diff options
| author | Christoph Groth <christoph.groth@cea.fr> | 2017-12-02 13:23:38 +0100 |
|---|---|---|
| committer | Christoph Groth <christoph.groth@cea.fr> | 2018-02-23 12:55:00 +0100 |
| commit | fe5dd3354ad2b5b48c104dca358a9c5e3119bcd1 (patch) | |
| tree | 3eaa0dc6e161c2b7fb104d7b3dad6ebde0b9842b /vquad/core.py | |
initial commit
Diffstat (limited to 'vquad/core.py')
| -rw-r--r-- | vquad/core.py | 240 |
1 files changed, 240 insertions, 0 deletions
diff --git a/vquad/core.py b/vquad/core.py new file mode 100644 index 0000000..1866276 --- /dev/null +++ b/vquad/core.py @@ -0,0 +1,240 @@ +# Copyright 2017 Christoph Groth (CEA). +# +# This file is part of Vquad. It is subject to the license terms in the file +# LICENSE.rst found in the top-level directory of this distribution. + +import numpy as np +from scipy.linalg import norm + +from . import tables as tbls + +eps = np.spacing(1) + +# If the relative difference between two consecutive approximations is +# lower than this value, the error estimate is considered reliable. +# See section 6.2 of Pedro Gonnet's thesis. +hint = 0.1 + +# Smallest acceptable relative difference of points in a rule. This was chosen +# such that no artifacts are apparent in plots of (i, log(a_i)), where a_i is +# the sequence of estimates of the integral value of an interval and all its +# ancestors.. +min_sep = 16 * eps + +min_level = 1 +max_level = 4 + +ndiv_max = 20 +max_ivals = 200 + +_sqrt_one_half = np.sqrt(0.5) + + +def _calc_coeffs(vals, level): + nans = np.flatnonzero(~np.isfinite(vals)) + if nans.size: + # Replace vals by a copy and zero-out non-finite elements. + vals = vals.copy() + vals[nans] = 0 + # Prepare things for the loop further down. + b = tbls.newton_coeffs[level].copy() + m = len(b) - 2 # = len(tbls.nodes[level]) - 1 + coeffs = tbls.inv_Vs[level] @ vals + + # This is a variant of Algorithm 7 from the thesis of Pedro Gonnet where no + # linear system has to be solved explicitly. Instead, Algorithm 5 is used. + for i in nans: + b[m + 1] /= tbls.alpha[m] + x = tbls.nodes[level][i] + b[m] = (b[m] + x * b[m + 1]) / tbls.alpha[m - 1] + for j in range(m - 1, 0, -1): + b[j] = ((b[j] + x * b[j + 1] - tbls.gamma[j + 1] * b[j + 2]) + / tbls.alpha[j - 1]) + b = b[1:] + + coeffs[:m] -= coeffs[m] / b[m] * b[:m] + coeffs[m] = 0 + m -= 1 + + return coeffs + + +class DivergentIntegralError(ValueError): + def __init__(self, msg, igral, err, nr_points): + self.igral = igral + self.err = err + self.nr_points = nr_points + super().__init__(msg) + + +class _Interval: + __slots__ = ['a', 'b', 'coeffs', 'vals', 'igral', 'err', 'level', 'depth', + 'ndiv', 'c00', 'unreliable_err'] + + def __init__(self, a, b, level, depth): + self.a = a + self.b = b + self.level = level + self.depth = depth + + def points(self): + a = self.a + b = self.b + return (a + b) / 2 + (b - a) * tbls.nodes[self.level] / 2 + + def interpolate(self, vals, coeffs_old=None): + self.vals = vals + self.coeffs = coeffs = _calc_coeffs(self.vals, self.level) + if self.level == min_level: + self.c00 = coeffs[0] + if coeffs_old is None: + coeffs_diff = norm(coeffs) + else: + coeffs_diff = np.zeros(max(len(coeffs_old), len(coeffs))) + coeffs_diff[:len(coeffs_old)] = coeffs_old + coeffs_diff[:len(coeffs)] -= coeffs + coeffs_diff = norm(coeffs_diff) + w = self.b - self.a + self.igral = w * coeffs[0] * _sqrt_one_half + self.err = w * coeffs_diff + self.unreliable_err = coeffs_diff > hint * norm(coeffs) + + +class Vquad: + """Evaluate an integral using adaptive quadrature. + + The algorithm uses Clenshaw-Curtis quadrature rules of increasing + degree in each interval. The error estimate is + sqrt(integrate((f0(x) - f1(x))**2)), where f0 and f1 are two + successive interpolations of the integrand. To fall below the + desired total error, intervals are worked on ranked by their own + absolute error: either the degree of the rule is increased or the + interval is split if either the function does not appear to be + smooth or a rule of maximum degree has been reached. + + Reference: "Increasing the Reliability of Adaptive Quadrature + Using Explicit Interpolants", P. Gonnet, ACM Transactions on + Mathematical Software, 37 (3), art. no. 26, 2008. + """ + + def __init__(self, f, a, b, level=max_level - 1): + ival = _Interval(a, b, level, 1) + vals = f(ival.points()) + ival.interpolate(vals) + ival.c00 = 0.0 # Will go away. + ival.ndiv = 0 + + self.ivals = [ival] + self.f = f + self.nr_points = len(vals) + self.igral_excess = 0 + self.err_excess = 0 + self.i_max = 0 + + def split(self, ival): + m = (ival.a + ival.b) / 2 + f_center = ival.vals[(len(ival.vals) - 1) // 2] + + depth = ival.depth + 1 + children = [_Interval(ival.a, m, min_level, depth), + _Interval(m, ival.b, min_level, depth)] + points = np.concatenate([child.points()[1:-1] for child in children]) + self.nr_points += len(points) + valss = np.empty((2, tbls.sizes[min_level])) + valss[:, 0] = ival.vals[0], f_center + valss[:, -1] = f_center, ival.vals[-1] + valss[:, 1:-1] = self.f(points).reshape((2, -1)) + + for child, vals, T in zip(children, valss, tbls.Ts): + child.interpolate(vals, T[:, :ival.coeffs.shape[0]] @ ival.coeffs) + child.ndiv = (ival.ndiv + + (ival.c00 and child.c00 / ival.c00 > 2)) + if child.ndiv > ndiv_max and 2*child.ndiv > child.depth: + msg = ('Possibly divergent integral in the interval ' + '[{}, {}]! (h={})') + raise DivergentIntegralError( + msg.format(child.a, child.b, child.b - child.a), + child.igral * np.inf, None, self.nr_points) + return children + + def refine(self, ival): + """Increase degree of interval.""" + ival.level += 1 + points = ival.points() + vals = np.empty(points.shape) + vals[0::2] = ival.vals + vals[1::2] = self.f(points[1::2]) + self.nr_points += (len(vals) - 1) // 2 + ival.interpolate(vals, ival.coeffs) + return points + + def improve(self): + i_max = self.i_max + + if self.ivals[i_max].level == max_level: + split = True + else: + points = self.refine(self.ivals[i_max]) + split = self.ivals[i_max].unreliable_err + + if (points[1] - points[0] < points[0] * min_sep + or points[-1] - points[-2] < points[-2] * min_sep + or (self.ivals[i_max].err + < (abs(self.ivals[i_max].igral) * eps + * tbls.V_cond_nums[self.ivals[i_max].level]))): + # Remove the interval (while remembering the excess integral + # and error), since it is either too narrow, or the estimated + # relative error is already at the limit of numerical accuracy + # and cannot be reduced further. + self.err_excess += self.ivals[i_max].err + self.igral_excess += self.ivals[i_max].igral + self.ivals[i_max] = self.ivals[-1] + self.ivals.pop() + return + + if split: + self.ivals.extend(self.split(self.ivals[i_max])) + self.ivals[i_max] = self.ivals.pop() + + def totals(self): + # Compute the total error and new max. + i_max = 0 + i_min = 0 + err = self.err_excess + igral = self.igral_excess + for i in range(len(self.ivals)): + if self.ivals[i].err > self.ivals[i_max].err: + i_max = i + elif self.ivals[i].err < self.ivals[i_min].err: + i_min = i + err += self.ivals[i].err + igral += self.ivals[i].igral + + # If there are too many intervals, remove the one with smallest + # contribution to the error. + if len(self.ivals) > max_ivals: + self.err_excess += self.ivals[i_min].err + self.igral_excess += self.ivals[i_min].igral + self.ivals[i_min] = self.ivals[-1] + self.ivals.pop() + if i_max == len(self.ivals): + i_max = i_min + + self.i_max = i_max + return igral, err + + def improve_until(self, tol): + while True: + self.improve() + igral, err = self.totals() + + if (err == 0 + or err < abs(igral) * tol + or err - self.err_excess < abs(igral) * tol < self.err_excess + or not self.ivals): + return igral, err, self.nr_points + + +def vquad(f, a, b, tol): + igrator = Vquad(f, a, b) + return igrator.improve_until(tol) |
