Skip to content

Commit

Permalink
Access the threshold in the algorithms. Clarify the examples. Plot th…
Browse files Browse the repository at this point in the history
…e condition error in Gill, Murray, Saunders and Wright
  • Loading branch information
mbaudin47 committed Dec 10, 2024
1 parent f5c109b commit 81d22c3
Show file tree
Hide file tree
Showing 8 changed files with 480 additions and 119 deletions.
4 changes: 4 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,10 @@ pip install numericalderivative
RAIRO. Analyse numérique, 11 (1), 13-25.

## Roadmap
- Evaluate and plot the criteria c(hphi) for Gill, Murray, Saunders & Wright.
- Evaluate and plot the criteria ell(h) for Dumontet & Vignes.
- See why the Dumontet & Vignes method fails on the polynomial function.
- See why the Gill, Murray, Saunders & Wright method fails on the sin function.
- Implement the method of:

Shi, H. J. M., Xie, Y., Xuan, M. Q., & Nocedal, J. (2022). Adaptive finite-difference interval estimation for noisy derivative-free optimization. _SIAM Journal on Scientific Computing_, _44_(4), A2302-A2321.
1 change: 1 addition & 0 deletions doc/examples/plot_general_fd.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
# Compute the first derivative using forward F.D. formula
# -------------------------------------------------------


# %%
# This is the function we want to compute the derivative of.
def scaled_exp(x):
Expand Down
197 changes: 142 additions & 55 deletions doc/examples/plot_gill_murray_saunders_wright.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@

# %%
def compute_first_derivative_GMSW(
f,
function,
x,
f_prime,
first_derivative,
kmin,
kmax,
verbose=False,
Expand All @@ -33,11 +33,11 @@ def compute_first_derivative_GMSW(
Parameters
----------
f : function
function : function
The function.
x : float
The point where the derivative is to be evaluated
f_prime : function
first_derivative : function
The exact first derivative of the function.
kmin : float, > 0
The minimum step k for the second derivative.
Expand All @@ -55,11 +55,11 @@ def compute_first_derivative_GMSW(
feval : int
The number of function evaluations.
"""
algorithm = nd.GillMurraySaundersWright(f, x, verbose=verbose)
algorithm = nd.GillMurraySaundersWright(function, x, verbose=verbose)
step, number_of_iterations = algorithm.compute_step(kmin, kmax)
f_prime_approx = algorithm.compute_first_derivative(step)
feval = algorithm.get_number_of_function_evaluations()
f_prime_exact = f_prime(x)
f_prime_exact = first_derivative(x)
if verbose:
print(f"Computed step = {step:.3e}")
print(f"Number of iterations = {number_of_iterations}")
Expand All @@ -74,8 +74,8 @@ def compute_first_derivative_GMSW(
kmin = 1.0e-15
kmax = 1.0e1
x = 1.0
benchmark = nd.ExponentialProblem()
second_derivative_value = benchmark.second_derivative(x)
problem = nd.ExponentialProblem()
second_derivative_value = problem.second_derivative(x)
optimal_step, absolute_error = nd.FirstDerivativeForward.compute_step(
second_derivative_value
)
Expand All @@ -84,9 +84,9 @@ def compute_first_derivative_GMSW(
absolute_error,
number_of_function_evaluations,
) = compute_first_derivative_GMSW(
benchmark.function,
problem.function,
x,
benchmark.first_derivative,
problem.first_derivative,
kmin,
kmax,
verbose=True,
Expand All @@ -101,8 +101,9 @@ def compute_first_derivative_GMSW(
kmin = 1.0e-9
kmax = 1.0e8
x = 1.0
benchmark = nd.ScaledExponentialProblem()
second_derivative_value = benchmark.second_derivative(x)
problem = nd.ScaledExponentialProblem()
second_derivative = problem.get_second_derivative()
second_derivative_value = second_derivative(x)
optimal_step, absolute_error = nd.FirstDerivativeForward.compute_step(
second_derivative_value
)
Expand All @@ -111,9 +112,9 @@ def compute_first_derivative_GMSW(
absolute_error,
number_of_function_evaluations,
) = compute_first_derivative_GMSW(
benchmark.function,
problem.get_function(),
x,
benchmark.first_derivative,
problem.get_first_derivative(),
kmin,
kmax,
verbose=True,
Expand All @@ -126,7 +127,7 @@ def compute_first_derivative_GMSW(

# %%
def benchmark_method(
function, derivative_function, test_points, kmin, kmax, verbose=False
function, first_derivative, test_points, kmin, kmax, verbose=False
):
"""
Apply Gill, Murray, Saunders & Wright method to compute the approximate first
Expand All @@ -136,10 +137,10 @@ def benchmark_method(
----------
f : function
The function.
derivative_function : function
first_derivative : function
The exact first derivative of the function
test_points : list(float)
The list of x points where the benchmark must be performed.
The list of x points where the problem must be performed.
kmin : float, > 0
The minimum step k for the second derivative.
kmax : float, > kmin
Expand All @@ -166,9 +167,9 @@ def benchmark_method(
absolute_error,
number_of_function_evaluations,
) = compute_first_derivative_GMSW(
function, x, derivative_function, kmin, kmax, verbose
function, x, first_derivative, kmin, kmax, verbose
)
relative_error = absolute_error / abs(derivative_function(x))
relative_error = absolute_error / abs(first_derivative(x))
if verbose:
print(
"x = %.3f, abs. error = %.3e, rel. error = %.3e, Func. eval. = %d"
Expand All @@ -188,30 +189,98 @@ def benchmark_method(
# %%
print("+ Benchmark on several points")
number_of_test_points = 100
test_points = np.linspace(0.01, 12.2, number_of_test_points)
kmin = 1.0e-13
kmax = 1.0e-1
benchmark = nd.ExponentialProblem()
problem = nd.ExponentialProblem()
interval = problem.get_interval()
test_points = np.linspace(interval[0], interval[1], number_of_test_points)
kmin = 1.0e-12
kmax = 1.0e1
average_error, average_feval = benchmark_method(
benchmark.function, benchmark.first_derivative, test_points, kmin, kmax, True
problem.get_function(),
problem.get_fifth_derivative(),
test_points,
kmin,
kmax,
True,
)

# %%
# Plot the condition error depending on k.
number_of_points = 1000
problem = nd.ScaledExponentialProblem()
x = problem.get_x()
name = problem.get_name()
function = problem.get_function()
kmin = 1.0e-10
kmax = 1.0e5
k_array = np.logspace(np.log10(kmin), np.log10(kmax), number_of_points)
algorithm = nd.GillMurraySaundersWright(function, x)
c_min, c_max = algorithm.get_threshold_min_max()
condition_array = np.zeros((number_of_points))
for i in range(number_of_points):
condition_array[i] = algorithm.compute_condition(k_array[i])

# %%
# The next plot presents the condition error :math:`c(h_\Phi)` depending
# on :math:`h_\Phi`.
# The two horizontal lines represent the minimum and maximum threshold
# values.
# We search for the value of :math:`h_\Phi` such that the condition
# error is between these two limits.

# %%
pl.figure(figsize=(4.0, 3.0))
pl.title(f"Condition error of {name} at x = {x}")
pl.plot(k_array, condition_array)
pl.plot([kmin, kmax], [c_min] * 2, label=r"$c_{min}$")
pl.plot([kmin, kmax], [c_max] * 2, label=r"$c_{max}$")
pl.xlabel(r"$h_\Phi$")
pl.ylabel(r"$c(h_\Phi$)")
pl.xscale("log")
pl.yscale("log")
pl.legend(bbox_to_anchor=(1.0, 1.0))
pl.tight_layout()

# %%
# The previous plot shows that the condition error is a decreasing function
# of :math:`h_\Phi`.

# %%
# For each function, at point x = 1, plot the error vs the step computed
# by the method


# %%
def plot_error_vs_h_with_GMSW_steps(
name, function, function_derivative, x, h_array, kmin, kmax, verbose=False
name, function, first_derivative, x, step_array, kmin, kmax, verbose=False
):
"""
Plot the computed error depending on the step for an array of F.D. steps
Parameters
----------
name : str
The name of the problem
function : function
The function.
first_derivative : function
The exact first derivative of the function
x : float
The input point where the test is done
step_array : list(float)
The array of finite difference steps
kmin : float, > 0
The minimum step k for the second derivative.
kmax : float, > kmin
The maximum step k for the second derivative.
verbose : bool, optional
Set to True to print intermediate messages. The default is False.
"""
algorithm = nd.GillMurraySaundersWright(function, x)
number_of_points = len(h_array)
number_of_points = len(step_array)
error_array = np.zeros((number_of_points))
for i in range(number_of_points):
h = h_array[i]
f_prime_approx = algorithm.compute_first_derivative(h)
error_array[i] = abs(f_prime_approx - function_derivative(x))
f_prime_approx = algorithm.compute_first_derivative(step_array[i])
error_array[i] = abs(f_prime_approx - first_derivative(x))

step, number_of_iterations = algorithm.compute_step(kmin, kmax)

Expand All @@ -223,7 +292,7 @@ def plot_error_vs_h_with_GMSW_steps(
maximum_error = np.nanmax(error_array)

pl.figure()
pl.plot(h_array, error_array)
pl.plot(step_array, error_array)
pl.plot(
[step] * 2,
[minimum_error, maximum_error],
Expand All @@ -241,71 +310,89 @@ def plot_error_vs_h_with_GMSW_steps(


# %%
def plot_error_vs_h_benchmark(benchmark, x, h_array, kmin, kmax, verbose=False):
def plot_error_vs_h_benchmark(problem, x, step_array, kmin, kmax, verbose=False):
"""
Plot the computed error depending on the step for an array of F.D. steps
Parameters
----------
problem : nd.BenchmarkProblem
The problem
x : float
The input point where the test is done
step_array : list(float)
The array of finite difference steps
kmin : float, > 0
The minimum step k for the second derivative.
kmax : float, > kmin
The maximum step k for the second derivative.
verbose : bool, optional
Set to True to print intermediate messages. The default is False.
"""
plot_error_vs_h_with_GMSW_steps(
benchmark.name,
benchmark.function,
benchmark.first_derivative,
problem.get_name(),
problem.get_function(),
problem.get_first_derivative(),
x,
h_array,
step_array,
kmin,
kmax,
verbose,
)


# %%
benchmark = nd.ExponentialProblem()
problem = nd.ExponentialProblem()
x = 1.0
number_of_points = 1000
h_array = np.logspace(-15.0, -1.0, number_of_points)
step_array = np.logspace(-15.0, -1.0, number_of_points)
kmin = 1.0e-15
kmax = 1.0e-1
plot_error_vs_h_benchmark(benchmark, x, h_array, kmin, kmax, True)
plot_error_vs_h_benchmark(problem, x, step_array, kmin, kmax, True)

# %%
x = 12.0
h_array = np.logspace(-15.0, -1.0, number_of_points)
plot_error_vs_h_benchmark(benchmark, x, h_array, kmin, kmax)
step_array = np.logspace(-15.0, -1.0, number_of_points)
plot_error_vs_h_benchmark(problem, x, step_array, kmin, kmax)

# %%
benchmark = nd.ScaledExponentialProblem()
problem = nd.ScaledExponentialProblem()
x = 1.0
kmin = 1.0e-10
kmax = 1.0e8
h_array = np.logspace(-10.0, 8.0, number_of_points)
plot_error_vs_h_benchmark(benchmark, x, h_array, kmin, kmax)
step_array = np.logspace(-10.0, 8.0, number_of_points)
plot_error_vs_h_benchmark(problem, x, step_array, kmin, kmax)

# %%
benchmark = nd.LogarithmicProblem()
problem = nd.LogarithmicProblem()
x = 1.1
kmin = 1.0e-14
kmax = 1.0e-1
h_array = np.logspace(-15.0, -1.0, number_of_points)
plot_error_vs_h_benchmark(benchmark, x, h_array, kmin, kmax, True)
step_array = np.logspace(-15.0, -1.0, number_of_points)
plot_error_vs_h_benchmark(problem, x, step_array, kmin, kmax, True)

# %%
benchmark = nd.SinProblem()
problem = nd.SinProblem()
x = 1.0
kmin = 1.0e-15
kmax = 1.0e-1
h_array = np.logspace(-15.0, -1.0, number_of_points)
plot_error_vs_h_benchmark(benchmark, x, h_array, kmin, kmax)
step_array = np.logspace(-15.0, -1.0, number_of_points)
plot_error_vs_h_benchmark(problem, x, step_array, kmin, kmax)

# %%
benchmark = nd.SquareRootProblem()
problem = nd.SquareRootProblem()
x = 1.0
kmin = 1.0e-15
kmax = 1.0e-1
h_array = np.logspace(-15.0, -1.0, number_of_points)
plot_error_vs_h_benchmark(benchmark, x, h_array, kmin, kmax, True)
step_array = np.logspace(-15.0, -1.0, number_of_points)
plot_error_vs_h_benchmark(problem, x, step_array, kmin, kmax, True)

# %%
benchmark = nd.AtanProblem()
problem = nd.AtanProblem()
x = 1.1
kmin = 1.0e-15
kmax = 1.0e-1
h_array = np.logspace(-15.0, -1.0, number_of_points)
plot_error_vs_h_benchmark(benchmark, x, h_array, kmin, kmax)
step_array = np.logspace(-15.0, -1.0, number_of_points)
plot_error_vs_h_benchmark(problem, x, step_array, kmin, kmax)

# %%
Loading

0 comments on commit 81d22c3

Please sign in to comment.