From 1194c6fbc1aa90d7a39c93f30bc4d3de67eef8e2 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Thu, 19 Sep 2024 16:11:48 +0200 Subject: [PATCH] Doc: Try to run examples in parallel --- .github/workflows/build.yml | 4 +- doc/conf.py | 15 ++ doc/examples/plot_crosscut_distribution_2d.py | 7 - doc/examples/sensitivity_problems/README.txt | 2 + .../plot_convergence_ishigami.py | 171 ++++++++++++++++++ .../plot_print_problems.py | 56 ++++++ ...parsePolynomialChaosSensitivityAnalysis.py | 5 +- requirements.txt | 1 + 8 files changed, 250 insertions(+), 11 deletions(-) create mode 100644 doc/examples/sensitivity_problems/README.txt create mode 100644 doc/examples/sensitivity_problems/plot_convergence_ishigami.py create mode 100644 doc/examples/sensitivity_problems/plot_print_problems.py diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index cbef763..2e3ca29 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -15,7 +15,7 @@ jobs: shell: bash -l {0} run: | black --check otbenchmark --config pyproject.toml - flake8 otbenchmark + flake8 otbenchmark doc/examples --max-line-length 120 - name: Test shell: bash -l {0} run: | @@ -25,7 +25,7 @@ jobs: shell: bash -l {0} run: | sudo apt install -y texlive-latex-recommended texlive-fonts-recommended texlive-latex-extra - make html -C doc + make html SPHINXOPTS="-W -T -j3" -C doc - name: Upload if: ${{ github.ref == 'refs/heads/master' }} run: | diff --git a/doc/conf.py b/doc/conf.py index d8f9648..d1f9622 100755 --- a/doc/conf.py +++ b/doc/conf.py @@ -15,6 +15,18 @@ import sys import subprocess +import sphinx_gallery +try: + from packaging.version import Version +except ImportError: + from pkg_resources import parse_version as Version + +try: + import joblib + have_joblib = True +except ImportError: + have_joblib = False + # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -38,6 +50,9 @@ 'show_signature': False, } +if Version(sphinx_gallery.__version__) >= Version("0.17.0"): + sphinx_gallery_conf["parallel"] = have_joblib + autodoc_default_options = { "members": None, "inherited-members": None, diff --git a/doc/examples/plot_crosscut_distribution_2d.py b/doc/examples/plot_crosscut_distribution_2d.py index 2df8ccf..d906cde 100644 --- a/doc/examples/plot_crosscut_distribution_2d.py +++ b/doc/examples/plot_crosscut_distribution_2d.py @@ -5,11 +5,7 @@ # %% import openturns as ot -import numpy as np import otbenchmark as otb -import openturns.viewer as otv -import pylab as pl - # %% # Create a Funky distribution @@ -20,18 +16,15 @@ x2 = ot.Normal(2.0, 1.0) x_funk = ot.ComposedDistribution([x1, x2], copula) - # %% # Create a Punk distribution x1 = ot.Normal(1.0, 1.0) x2 = ot.Normal(-2.0, 1.0) x_punk = ot.ComposedDistribution([x1, x2], copula) - # %% distribution = ot.Mixture([x_funk, x_punk], [0.5, 1.0]) - # %% referencePoint = distribution.getMean() diff --git a/doc/examples/sensitivity_problems/README.txt b/doc/examples/sensitivity_problems/README.txt new file mode 100644 index 0000000..793944c --- /dev/null +++ b/doc/examples/sensitivity_problems/README.txt @@ -0,0 +1,2 @@ +Sensitivity examples +==================== diff --git a/doc/examples/sensitivity_problems/plot_convergence_ishigami.py b/doc/examples/sensitivity_problems/plot_convergence_ishigami.py new file mode 100644 index 0000000..48b4dbe --- /dev/null +++ b/doc/examples/sensitivity_problems/plot_convergence_ishigami.py @@ -0,0 +1,171 @@ +""" +Convergence of estimators on Ishigami +===================================== +""" + +# %% +# In this example, we present the convergence of the sensitivity indices of the Ishigami test function. +# +# We compare different estimators. +# * Sampling methods with different estimators: Saltelli, Mauntz-Kucherenko, Martinez, Jansen, +# * Sampling methods with different design of experiments: Monte-Carlo, LHS, Quasi-Monte-Carlo, +# * Polynomial chaos. +# + +# %% +import openturns as ot +import otbenchmark as otb +import openturns.viewer as otv +import numpy as np + + +# %% +# When we estimate Sobol' indices, we may encounter the following warning messages: +# ``` +# WRN - The estimated first order Sobol index (2) is greater than its total order index... +# WRN - The estimated total order Sobol index (2) is lesser than first order index ... +# ``` +# Lots of these messages are printed in the current Notebook. This is why we disable them with: +ot.Log.Show(ot.Log.NONE) + + +# %% +problem = otb.IshigamiSensitivity() +print(problem) + +# %% +distribution = problem.getInputDistribution() +model = problem.getFunction() + +# %% +# Exact first and total order +exact_first_order = problem.getFirstOrderIndices() +print(exact_first_order) +exact_total_order = problem.getTotalOrderIndices() +print(exact_total_order) + +# %% +# Perform sensitivity analysis +# ---------------------------- + +# %% +# Create X/Y data +ot.RandomGenerator.SetSeed(0) +size = 10000 +inputDesign = ot.SobolIndicesExperiment(distribution, size).generate() +outputDesign = model(inputDesign) + +# %% +# Compute first order indices using the Saltelli estimator +sensitivityAnalysis = ot.SaltelliSensitivityAlgorithm(inputDesign, outputDesign, size) +computed_first_order = sensitivityAnalysis.getFirstOrderIndices() +computed_total_order = sensitivityAnalysis.getTotalOrderIndices() + +# %% +# Compare with exact results +print("Sample size : ", size) +# First order +# Compute absolute error (the LRE cannot be computed, +# because S can be zero) +print("Computed first order = ", computed_first_order) +print("Exact first order = ", exact_first_order) +# Total order +print("Computed total order = ", computed_total_order) +print("Exact total order = ", exact_total_order) + +# %% +dimension = distribution.getDimension() + +# %% +# Compute componentwise absolute error. +first_order_AE = ot.Point(np.abs(exact_first_order - computed_first_order)) +total_order_AE = ot.Point(np.abs(exact_total_order - computed_total_order)) + +# %% +print("Absolute error") +for i in range(dimension): + print( + "AE(S%d) = %.4f, AE(T%d) = %.4f" % (i, first_order_AE[i], i, total_order_AE[i]) + ) + +# %% +metaSAAlgorithm = otb.SensitivityBenchmarkMetaAlgorithm(problem) +for estimator in ["Saltelli", "Martinez", "Jansen", "MauntzKucherenko", "Janon"]: + print("Estimator:", estimator) + benchmark = otb.SensitivityConvergence( + problem, + metaSAAlgorithm, + numberOfRepetitions=4, + maximum_elapsed_time=2.0, + sample_size_initial=20, + estimator=estimator, + ) + grid = benchmark.plotConvergenceGrid(verbose=False) + view = otv.View(grid) + figure = view.getFigure() + _ = figure.suptitle("%s, %s" % (problem.getName(), estimator)) + figure.set_figwidth(10.0) + figure.set_figheight(5.0) + figure.subplots_adjust(wspace=0.4, hspace=0.4) + +# %% +benchmark = otb.SensitivityConvergence( + problem, + metaSAAlgorithm, + numberOfRepetitions=4, + maximum_elapsed_time=2.0, + sample_size_initial=20, + estimator="Saltelli", + sampling_method="MonteCarlo", +) +graph = benchmark.plotConvergenceCurve() +_ = otv.View(graph) + +# %% +grid = ot.GridLayout(1, 3) +maximum_absolute_error = 1.0 +minimum_absolute_error = 1.0e-5 +sampling_method_list = ["MonteCarlo", "LHS", "QMC"] +for sampling_method_index in range(3): + sampling_method = sampling_method_list[sampling_method_index] + benchmark = otb.SensitivityConvergence( + problem, + metaSAAlgorithm, + numberOfRepetitions=4, + maximum_elapsed_time=2.0, + sample_size_initial=20, + estimator="Saltelli", + sampling_method=sampling_method, + ) + graph = benchmark.plotConvergenceCurve() + # Change bounding box + box = graph.getBoundingBox() + bound = box.getLowerBound() + bound[1] = minimum_absolute_error + box.setLowerBound(bound) + bound = box.getUpperBound() + bound[1] = maximum_absolute_error + box.setUpperBound(bound) + graph.setBoundingBox(box) + grid.setGraph(0, sampling_method_index, graph) +_ = otv.View(grid) + +# %% +# Use polynomial chaos. +benchmark = otb.SensitivityConvergence( + problem, + metaSAAlgorithm, + numberOfExperiments=12, + numberOfRepetitions=1, + maximum_elapsed_time=5.0, + sample_size_initial=20, + use_sampling=False, + total_degree=20, + hyperbolic_quasinorm=1.0, +) +graph = benchmark.plotConvergenceCurve(verbose=True) +graph.setLegendPosition("bottomleft") +_ = otv.View(graph) + +# %% +otv.View.ShowAll() diff --git a/doc/examples/sensitivity_problems/plot_print_problems.py b/doc/examples/sensitivity_problems/plot_print_problems.py new file mode 100644 index 0000000..1164ac3 --- /dev/null +++ b/doc/examples/sensitivity_problems/plot_print_problems.py @@ -0,0 +1,56 @@ +""" +Print the sensitivity analysis problems +======================================= +""" + +# %% +# In this example, we show how to print all the sensitivity analysis problems. + +# %% +import otbenchmark as otb +import pandas as pd + +# %% +# We import the list of problems. +benchmarkProblemList = otb.SensitivityBenchmarkProblemList() +numberOfProblems = len(benchmarkProblemList) +print(numberOfProblems) + +# %% +# For each problem in the benchmark, print the problem name and the exact Sobol' indices. +for i in range(numberOfProblems): + problem = benchmarkProblemList[i] + name = problem.getName() + first_order_indices = problem.getFirstOrderIndices() + total_order_indices = problem.getTotalOrderIndices() + dimension = problem.getInputDistribution().getDimension() + print( + "#", + i, + ":", + name, + " : S = ", + first_order_indices, + "T=", + total_order_indices, + ", dimension=", + dimension, + ) + +# %% +problem_names = [ + benchmarkProblem.getName() for benchmarkProblem in benchmarkProblemList +] +columns = ["$d$"] +df_problems_list = pd.DataFrame(index=problem_names, columns=columns) +for problem in benchmarkProblemList: + name = problem.getName() + d = problem.getInputDistribution().getDimension() + df_problems_list.loc[name] = [int(d)] +print(df_problems_list) + +# %% +latex_code = df_problems_list.to_latex() +text_file = open("sensitivity_problems_list.tex", "w") +text_file.write(latex_code) +text_file.close() diff --git a/otbenchmark/SparsePolynomialChaosSensitivityAnalysis.py b/otbenchmark/SparsePolynomialChaosSensitivityAnalysis.py index 50093bb..e8ebfbc 100644 --- a/otbenchmark/SparsePolynomialChaosSensitivityAnalysis.py +++ b/otbenchmark/SparsePolynomialChaosSensitivityAnalysis.py @@ -145,8 +145,9 @@ def run(self, verbose=False): experiment = ot.MonteCarloExperiment(distribution, self.sample_size_test) inputTest = experiment.generate() outputTest = model(inputTest) - val = ot.MetaModelValidation(inputTest, outputTest, metamodel) - predictivity_coefficient = val.computePredictivityFactor()[0] + predictions = metamodel(inputTest) + val = ot.MetaModelValidation(outputTest, predictions) + predictivity_coefficient = val.computeR2Score()[0] if verbose: print("Q2=%.2f%%" % (100 * predictivity_coefficient)) diff --git a/requirements.txt b/requirements.txt index cc97f76..d65ee89 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,3 +13,4 @@ pandas<2.0 tqdm shapely lxml-html-clean +joblib