From d797972c73b232963b8ad8dd1fb080bf48e21cd0 Mon Sep 17 00:00:00 2001 From: Christoph Groth Date: Sat, 3 Mar 2018 14:54:24 +0100 Subject: sharpen divergence detection test Instead of testing the divergence detection with functions that consist of a single peak, use functions of multiple peaks with the exponent drawn randomly. --- vquad/test/test_core.py | 54 +++++++++++++++++++++++++------------------------ 1 file changed, 28 insertions(+), 26 deletions(-) (limited to 'vquad') diff --git a/vquad/test/test_core.py b/vquad/test/test_core.py index f5fce6d..1e5c951 100644 --- a/vquad/test/test_core.py +++ b/vquad/test/test_core.py @@ -213,45 +213,47 @@ def test_interpolation(): raises(ValueError, vquad, x) -def f63(x, alpha, beta): - return abs(x - beta) ** alpha - - -def F63(x, alpha, beta): - return (x - beta) * abs(x - beta) ** alpha / (alpha + 1) - - -def test_analytic(n=200): +def test_divergence_detection(n=200): def f(x): - return f63(x, alpha, beta) + x = x.reshape((-1, 1)) + return (c * abs(x - λ) ** α).sum(1) def F(x): - return F63(x, alpha, beta) + x = x.reshape((-1, 1)) + return (c * (x - λ) * abs(x - λ) ** α / (α + 1)).sum(1) old_settings = np.seterr(all='ignore') - np.random.seed(123) - params = np.empty((n, 2)) - params[:, 0] = np.linspace(-0.5, -1.5, n) - params[:, 1] = np.random.random_sample(n) - + rng = np.random.RandomState(123) false_negatives = 0 false_positives = 0 - for alpha, beta in params: + shape = (1, 7) + num_divergent = 0 + for i in range(n): + α = rng.uniform(-0.1, -1.1, shape) + λ = rng.uniform(0, 1, shape) + c = np.exp(rng.uniform(-8, 0, shape)) * rng.choice((-1, 1), shape) + # The above is chosen such that the integral is divergent about 50% of + # the time. + divergent = (α <= -1).any() + num_divergent += divergent try: - igral, err = core.vquad(f, 0, 1, 1e-3) + vquad = core.Vquad(f, 0, 1) + igral, err = vquad.improve_until(1e-3) except core.DivergentIntegralError: - assert alpha < -0.8 - false_negatives += alpha > -1 + false_negatives += not divergent else: - if alpha <= -1: + if divergent: false_positives += 1 - else: - igral_exact = F(1) - F(0) - assert alpha < -0.7 or abs(igral - igral_exact) < err - assert false_negatives < 0.05 * n - assert false_positives < 0.05 * n + print("false negatives (non-divergent but failure):", + false_negatives, "/", n - num_divergent) + print("false positives (divergent but success):", + false_positives, "/", num_divergent) + + # Hopefully we can improve on the following! + assert false_negatives < 0.3 * (n - num_divergent) + assert false_positives < 0.3 * num_divergent np.seterr(**old_settings) -- cgit v1.2.3-74-g4815