From 51f64b0e355b4c54cbfab9412ffd5441c10d23ca Mon Sep 17 00:00:00 2001 From: Michael BAUDIN Date: Wed, 11 Dec 2024 12:57:08 +0100 Subject: [PATCH] Added comments to the examples. Adds equations to the docstrings. --- doc/examples/plot_dumontet_vignes.py | 167 ++++++++++++++---- .../plot_gill_murray_saunders_wright.py | 9 + doc/examples/plot_stepleman_winarsky.py | 26 ++- numericalderivative/_DumontetVignes.py | 4 +- numericalderivative/_SteplemanWinarsky.py | 42 ++++- 5 files changed, 197 insertions(+), 51 deletions(-) diff --git a/doc/examples/plot_dumontet_vignes.py b/doc/examples/plot_dumontet_vignes.py index d72fdcf..2825eac 100644 --- a/doc/examples/plot_dumontet_vignes.py +++ b/doc/examples/plot_dumontet_vignes.py @@ -23,6 +23,13 @@ # %% # In the next example, we use the algorithm on the exponential function. +# We create the :class:`~numericalderivative.DumontetVignes` algorithm using the function and the point x. +# Then we use the :meth:`~numericalderivative.DumontetVignes.compute_step()` method to compute the step, +# using an upper bound of the step as an initial point of the algorithm. +# Finally, use the :meth:`~numericalderivative.DumontetVignes.compute_first_derivative()` method to compute +# an approximate value of the first derivative using finite differences. +# The :meth:`~numericalderivative.DumontetVignes.get_number_of_function_evaluations()` method +# can be used to get the number of function evaluations. # %% x = 1.0 @@ -43,17 +50,26 @@ # Useful functions # ---------------- +# %% +# These functions will be used later in this example. + # %% def compute_ell(function, x, k, relative_precision): + """ + Compute the L ratio for a given value of the step k. + """ algorithm = nd.DumontetVignes(function, x, relative_precision=relative_precision) ell = algorithm.compute_ell(k) return ell def compute_f3_inf_sup(function, x, k, relative_precision): + """ + Compute the upper and lower bounds of the third derivative for a given value of k. + """ algorithm = nd.DumontetVignes(function, x, relative_precision=relative_precision) - ell, f3inf, f3sup = algorithm.compute_ell(k) + _, f3inf, f3sup = algorithm.compute_ell(k) return f3inf, f3sup @@ -70,8 +86,9 @@ def plot_step_sensitivity( kmax=None, ): """ - Compute the approximate derivative using central F.D. formula. Create a plot representing the absolute error depending on step. + + Compute the approximate derivative using central F.D. formula. Plot the approximately optimal step computed by DumontetVignes. Parameters @@ -145,7 +162,7 @@ def plot_step_sensitivity( pl.figure() pl.plot(step_array, error_array) pl.plot( - [estim_step] * 2, [minimum_error, maximum_error], "--", label=r"$\tilde{h}$" + [estim_step] * 2, [minimum_error, maximum_error], "--", label=r"$\widetilde{h}$" ) pl.plot(optimal_step, optimal_error, "o", label=r"$h^\star$") pl.title("Finite difference for %s" % (name)) @@ -188,9 +205,7 @@ def plot_ell_ratio( k_array = np.logspace(np.log10(kmin), np.log10(kmax), number_of_points) ell_array = np.zeros((number_of_points)) for i in range(number_of_points): - ell_array[i], f3inf, f3sup = compute_ell( - function, x, k_array[i], relative_precision - ) + ell_array[i], _, _ = compute_ell(function, x, k_array[i], relative_precision) pl.figure() pl.plot(k_array, ell_array) @@ -221,7 +236,17 @@ def plot_ell_ratio( # ------------------------------------- # %% -# 1. Consider the Exponential function. +# The L ratio is the criterion used in (Dumontet & Vignes, 1977) algorithm +# to find a satisfactory step k. +# The algorithm searches for a step k so that the L ratio is inside +# an interval. +# This is computed by the :meth:`~numericalderivative.DumontetVignes.compute_ell` +# method. +# In the next examples, we plot the L ratio depending on k +# for different functions. + +# %% +# 1. Consider the :class:`~numericalderivative.ExponentialProblem` function. number_of_points = 1000 relative_precision = 1.0e-15 @@ -342,20 +367,25 @@ def plot_ell_ratio( # Plot the error depending on the step # ------------------------------------ +# %% +# In the next examples, we plot the error of the approximation of the +# first derivative by the finite difference formula depending +# on the step size. + # %% x = 4.0 -benchmark = nd.ExponentialProblem() -function = benchmark.get_function() +problem = nd.ExponentialProblem() +function = problem.get_function() absolute_precision = sys.float_info.epsilon * function(x) print("absolute_precision = %.3e" % (absolute_precision)) # %% x = 4.1 -function = benchmark.get_function() +function = problem.get_function() relative_precision = sys.float_info.epsilon name = "exp" -function_derivative = benchmark.get_first_derivative() -function_third_derivative = benchmark.get_third_derivative() +function_derivative = problem.get_first_derivative() +function_third_derivative = problem.get_third_derivative() number_of_points = 1000 step_array = np.logspace(-15.0, 0.0, number_of_points) kmin = 1.0e-5 @@ -374,7 +404,16 @@ def plot_ell_ratio( ) # %% -# 2. Scaled exponential +# In the previous figure, we see that the error reaches +# a minimum, which is indicated by the green point labeled :math:`h^\star`. +# The vertical dotted line represents the approximately optimal step :math:`\widetilde{h}` +# returned by the :meth:`~numericalderivative.DumontetVignes.compute_step` method. +# We see that the method correctly computes an approximation of the the optimal step. + + +# %% +# Consider the :class:`~numericalderivative.ScaledExponentialProblem`. +# First, we plot the L ratio. x = 1.0 relative_precision = 1.0e-14 @@ -395,13 +434,16 @@ def plot_ell_ratio( plot_L_constants=True, ) +# %% +# Then plot the error depending on the step size. + # %% x = 1.0 name = "scaled exp" -benchmark = nd.ScaledExponentialProblem() -function = benchmark.function -function_derivative = benchmark.first_derivative -function_third_derivative = benchmark.third_derivative +problem = nd.ScaledExponentialProblem() +function = problem.function +function_derivative = problem.first_derivative +function_third_derivative = problem.third_derivative number_of_points = 1000 step_array = np.logspace(-7.0, 6.0, number_of_points) kmin = 1.0e-2 @@ -420,8 +462,12 @@ def plot_ell_ratio( ) # %% -# -print("+ 3. Square root") +# The previous figure shows that the optimal step is close to +# :math:`10^1`, which may be larger than what we may typically expect +# as a step size for a finite difference formula. + +# %% +# Consider the :class:`~numericalderivative.SquareRootProblem`. x = 1.0 relative_precision = 1.0e-14 @@ -443,34 +489,67 @@ def plot_ell_ratio( ) pl.ylim(-20.0, 20.0) +# %% +# Compute the lower and upper bounds of the third derivative +# ---------------------------------------------------------- + +# %% +# The algorithm is based on bounds of the third derivative, which is +# computed by the :meth:`~numericalderivative.DumontetVignes.compute_ell` method. +# These bounds are used to find a step which is approximately +# optimal to compute the step of the finite difference formula +# used for the first derivative. +# Hence, it is interesting to compare the bounds computed +# by the (Dumontet & Vignes, 1977) algorithm and the +# actual value of the third derivative. +# To compute the true value of the third derivative, +# we use two different methods: +# +# - a finite difference formula, using :class:`~numericalderivative.ThirdDerivativeCentral`, +# - the exact third derivative, using :meth:`~numericalderivative.SquareRootProblem.get_third_derivative`. # %% x = 1.0 -k = 1.0e-3 +k = 1.0e-3 # A first guess print("x = ", x) print("k = ", k) -benchmark = nd.SquareRootProblem() -finite_difference = nd.ThirdDerivativeCentral(benchmark.function, x) +problem = nd.SquareRootProblem() +function = problem.get_function() +finite_difference = nd.ThirdDerivativeCentral(function, x) approx_f3d = finite_difference.compute(k) print("Approx. f''(x) = ", approx_f3d) -exact_f3d = benchmark.third_derivative(x) +third_derivative = problem.get_third_derivative() +exact_f3d = third_derivative(x) print("Exact f''(x) = ", exact_f3d) # %% relative_precision = 1.0e-14 print("relative_precision = ", relative_precision) -f3inf, f3sup = compute_f3_inf_sup(benchmark.function, x, k, relative_precision) +function = problem.get_function() +f3inf, f3sup = compute_f3_inf_sup(function, x, k, relative_precision) print("f3inf = ", f3inf) print("f3sup = ", f3sup) +# %% +# The previous outputs shows that the lower and upper bounds +# computed by the algorithm contain, indeed, the true value of the +# third derivative in this case. + +# %% +# The algorithm is based on finding an approximatly optimal +# step k to compute the third derivative. +# The next script computes the error of the central formula for the +# finite difference formula depending on the step k. + # %% number_of_points = 1000 +third_derivative = problem.get_third_derivative() k_array = np.logspace(-6.0, -1.0, number_of_points) error_array = np.zeros((number_of_points)) for i in range(number_of_points): - algorithm = nd.ThirdDerivativeCentral(benchmark.function, x) + algorithm = nd.ThirdDerivativeCentral(problem.function, x) f2nde_approx = algorithm.compute(k_array[i]) - error_array[i] = abs(f2nde_approx - benchmark.third_derivative(x)) + error_array[i] = abs(f2nde_approx - third_derivative(x)) # %% pl.figure() @@ -488,11 +567,20 @@ def plot_ell_ratio( # ------------------------------------------------------- # %% +# The next figure presents the sensitivity of the +# lower and upper bounds of the third derivative to the step k. +# Moreover, it presents the approximation of the third derivative +# using the central finite difference formula. +# This makes it possible to check that the lower and upper bounds +# actually contain the approximation produced by the F.D. formula. + +# %% +problem = nd.SquareRootProblem() number_of_points = 1000 relative_precision = 1.0e-16 k_array = np.logspace(-5.0, -4.0, number_of_points) f3_array = np.zeros((number_of_points, 3)) -function = benchmark.get_function() +function = problem.get_function() for i in range(number_of_points): f3inf, f3sup = compute_f3_inf_sup(function, x, k_array[i], relative_precision) algorithm = nd.ThirdDerivativeCentral(function, x) @@ -514,7 +602,7 @@ def plot_ell_ratio( # %% x = 1.0e-2 relative_precision = 1.0e-14 -function = benchmark.get_function() +function = problem.get_function() name = "sqrt" number_of_digits = 53 kmin = 4.4e-7 @@ -533,13 +621,15 @@ def plot_ell_ratio( pl.ylim(-20.0, 20.0) # %% -# Search step +# The next example searches the optimal step for the square root function. + +# %% x = 1.0e-2 relative_precision = 1.0e-14 kmin = 1.0e-8 kmax = 1.0e-3 verbose = True -function = benchmark.get_function() +function = problem.get_function() algorithm = nd.DumontetVignes( function, x, relative_precision=relative_precision, verbose=verbose ) @@ -549,7 +639,7 @@ def plot_ell_ratio( print(f"number_of_feval = {number_of_feval}") f_prime_approx = algorithm.compute_first_derivative(h_optimal) feval = algorithm.get_number_of_function_evaluations() -first_derivative = benchmark.get_first_derivative() +first_derivative = problem.get_first_derivative() absolute_error = abs(f_prime_approx - first_derivative(x)) print("Abs. error = %.3e" % (absolute_error)) @@ -560,12 +650,11 @@ def plot_ell_ratio( # %% -# -print("+ 4. sin") +# Consider the :class:`~numericalderivative.SinProblem`. x = 1.0 relative_precision = 1.0e-14 -benchmark = nd.SinProblem() -function = benchmark.function +problem = nd.SinProblem() +function = problem.get_function() name = "sin" number_of_digits = 53 kmin = 1.0e-5 @@ -588,15 +677,17 @@ def plot_ell_ratio( k = 1.0e-3 print("x = ", x) print("k = ", k) -algorithm = nd.ThirdDerivativeCentral(benchmark.function, x) +algorithm = nd.ThirdDerivativeCentral(problem.function, x) approx_f3d = algorithm.compute(k) print("Approx. f''(x) = ", approx_f3d) -exact_f3d = benchmark.third_derivative(x) +third_derivative = problem.get_third_derivative() +exact_f3d = third_derivative(x) print("Exact f''(x) = ", exact_f3d) relative_precision = 1.0e-14 print("relative_precision = ", relative_precision) -f3inf, f3sup = compute_f3_inf_sup(benchmark.function, x, k, relative_precision) +function = problem.get_function() +f3inf, f3sup = compute_f3_inf_sup(function, x, k, relative_precision) print("f3inf = ", f3inf) print("f3sup = ", f3sup) diff --git a/doc/examples/plot_gill_murray_saunders_wright.py b/doc/examples/plot_gill_murray_saunders_wright.py index 08b0b26..b5cad3e 100644 --- a/doc/examples/plot_gill_murray_saunders_wright.py +++ b/doc/examples/plot_gill_murray_saunders_wright.py @@ -24,6 +24,15 @@ # %% # In the next example, we use the algorithm on the exponential function. +# We create the :class:`~numericalderivative.GillMurraySaundersWright` algorithm using the function and the point x. +# Then we use the :meth:`~numericalderivative.GillMurraySaundersWright.compute_step()` method to compute the step, +# using an upper bound of the step as an initial point of the algorithm. +# Finally, use the :meth:`~numericalderivative.GillMurraySaundersWright.compute_first_derivative()` method to compute +# an approximate value of the first derivative using finite differences. +# The :meth:`~numericalderivative.GillMurraySaundersWright.get_number_of_function_evaluations()` method +# can be used to get the number of function evaluations. + +# %% x = 1.0 algorithm = nd.GillMurraySaundersWright(np.exp, x, verbose=True) kmin = 1.0e-10 diff --git a/doc/examples/plot_stepleman_winarsky.py b/doc/examples/plot_stepleman_winarsky.py index 6a2c448..3f0d925 100644 --- a/doc/examples/plot_stepleman_winarsky.py +++ b/doc/examples/plot_stepleman_winarsky.py @@ -25,6 +25,13 @@ # %% # In the next example, we use the algorithm on the exponential function. +# We create the :class:`~numericalderivative.SteplemanWinarsky` algorithm using the function and the point x. +# Then we use the :meth:`~numericalderivative.SteplemanWinarsky.compute_step()` method to compute the step, +# using an upper bound of the step as an initial point of the algorithm. +# Finally, use the :meth:`~numericalderivative.SteplemanWinarsky.compute_first_derivative()` method to compute +# an approximate value of the first derivative using finite differences. +# The :meth:`~numericalderivative.SteplemanWinarsky.get_number_of_function_evaluations()` method +# can be used to get the number of function evaluations. # %% x = 1.0 @@ -207,6 +214,11 @@ def fd_difference(h1, h2, function, x): # Plot number of lost digits vs h # ------------------------------- +# %% +# The :meth:`~numericalderivative.SteplemanWinarsky.number_of_lost_digits` method +# computes the number of lost digits in the approximated derivative +# depending on the step. + # %% h = 1.0e4 print("Starting h = ", h) @@ -276,7 +288,9 @@ def fd_difference(h1, h2, function, x): # %% # In some cases, it is difficult to find the initial step. # In this case, we can use the bisection algorithm, which can produce -# an initial guess for the step. +# an initial guess for the step.c +# This algorithm is based on a search for a suitable step within +# an interval. # %% # Test with single point and default parameters. @@ -293,7 +307,8 @@ def fd_difference(h1, h2, function, x): print("Func. eval = ", feval) # %% -# Do not use the log scale (this can be slower). +# See how the algorithm behaves if we use or do not use the log scale +# when searching for the optimal step (this can be slower). # %% x = 1.0 @@ -316,7 +331,12 @@ def fd_difference(h1, h2, function, x): print("Pas initial = ", h0, ", iterations = ", iterations) # %% -# Test +# In the next example, we search for an initial step using bisection, +# then use this step as an initial guess for the algorithm. +# Finally, we compute an approximation of the first derivative using +# the finite difference formula. + +# %% benchmark = nd.ExponentialProblem() x = 1.0 algorithm = nd.SteplemanWinarsky(benchmark.function, x, verbose=True) diff --git a/numericalderivative/_DumontetVignes.py b/numericalderivative/_DumontetVignes.py index e65967a..fce82b3 100644 --- a/numericalderivative/_DumontetVignes.py +++ b/numericalderivative/_DumontetVignes.py @@ -98,7 +98,7 @@ class DumontetVignes: .. math:: - \ell(k) \in [L_1, L_2] \cup [L_3, L_4] + L(k) \in [L_1, L_2] \cup [L_3, L_4] where: @@ -186,7 +186,7 @@ def __init__( self.x = x def get_ell_min_max(self): - """ + r""" Return the minimum and maximum of the L ratio The parameters L1 and L2 can be computed from the equations: diff --git a/numericalderivative/_SteplemanWinarsky.py b/numericalderivative/_SteplemanWinarsky.py index f0afe22..7f843d0 100644 --- a/numericalderivative/_SteplemanWinarsky.py +++ b/numericalderivative/_SteplemanWinarsky.py @@ -19,10 +19,12 @@ class SteplemanWinarsky: .. math:: - f'(x) \approx \frac{f(x + h) - f(x - h)}{2 h} + f'(x) \approx d(h) := \frac{f(x + h) - f(x - h)}{2 h} where :math:`f` is the function, :math:`x \in \mathbb{R}` is the input point and :math:`h > 0` is the step. + In the previous equation, the function :math:`d` is the central finite difference + formula which is considered in this method. The goal of the method is to compute an approximately optimal :math:`h^\star` for the central F.D. formula using function evaluations only. @@ -119,7 +121,7 @@ def __init__( return def compute_step(self, initial_step=None, iteration_maximum=53, beta=4.0): - """ + r""" Compute an approximate optimum step for central derivative using monotony properties. This function computes an approximate optimal step h for the central @@ -129,7 +131,13 @@ def compute_step(self, initial_step=None, iteration_maximum=53, beta=4.0): ---------- initial_step : float, > 0.0 The initial value of the differentiation step. - The default initial step is beta * relative_error**(1/3) * abs(x) + The default initial step is :math:`\beta \epsilon_r^{1/3} |x|` + where :math:`\beta > 0` is the reduction factor, + :math:`\epsilon_r` is the relative error and :math:`x` is the + current point. + The algorithm produces a sequence of decreasing steps. + Hence, the initial step should be an upper bound of the true + optimal step. iteration_maximum : int, optional The number of iterations. The default is 53. beta : float, > 1.0 @@ -205,9 +213,27 @@ def compute_step(self, initial_step=None, iteration_maximum=53, beta=4.0): return estim_step, number_of_iterations def number_of_lost_digits(self, h): - """ + r""" Compute the number of (base 10) lost digits. + The loss of figures produced by the substraction in the finite + difference formula is (see (Stepleman & Winarsky, 1979), eq. 3.10 page 1261): + + .. math:: + + \delta(h) = \left|\frac{2hd(h)}{f(x)}\right| + + where :math:`h > 0` is the step and :math:`d(h)` is the central + finite difference formula. + The number of decimal digits losts in the substraction is + (see (Stepleman & Winarsky, 1979), eq. 3.11 page 1261): + + .. math:: + + N(h) = -\log_{10}(\delta(h)) + + where :math:`\log_{10}` is the base-10 decimal digit. + Parameters ---------- h : float @@ -221,7 +247,7 @@ def number_of_lost_digits(self, h): """ d = self.first_derivative_central.compute(h) function_value = self.function(self.x) - # eq. 3.10 + # eq. 3.10 page 1261 if function_value == 0.0: delta = abs(2 * h * d) else: @@ -248,12 +274,12 @@ def search_step_with_bisection( 0 < N(h_0) < T := \log_{10}\left(\frac{\epsilon_r^{-1 / 3}}{\beta}\right) where :math:`N` is the number of lost digits (as computed by - `number_of_lost_digits()`), :math:`h_0` is the initial step and + :meth:`number_of_lost_digits()`), :math:`h_0` is the initial step and :math:`\epsilon_r` is the relative precision of the function evaluation. - This algorithm can be effective compared to `compute_step()` + This algorithm can be effective compared to :meth:`compute_step()` in the cases where it is difficult to find an initial step. - In this case, the step returned by `search_step_with_bisection()` + In this case, the step returned by :meth:`search_step_with_bisection()` can be used as the initial step for compute_step(). This can require several extra function evaluations.