From 13940fc8fe103bb8ae7fef8a1b603ed9506bd001 Mon Sep 17 00:00:00 2001 From: MRIDUL JAIN <105979087+Spinachboul@users.noreply.github.com> Date: Sun, 7 Jan 2024 15:59:45 +0530 Subject: [PATCH 01/16] Update tensor_prod.md --- docs/src/tensor_prod.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/tensor_prod.md b/docs/src/tensor_prod.md index 832603cc..c20902c1 100644 --- a/docs/src/tensor_prod.md +++ b/docs/src/tensor_prod.md @@ -1,6 +1,6 @@ # Tensor product function The tensor product function is defined as: -``f(x) = \prod_{i=1}^d \cos(a\pi x_i)`` +``\[ f(x) = ∏ᵢ=₁ᵈ cos(aπxᵢ) \]`` Let's import Surrogates and Plots: ```@example tensor From e75b13af12bd6279650ec18bcf154f0ac1431a40 Mon Sep 17 00:00:00 2001 From: MRIDUL JAIN <105979087+Spinachboul@users.noreply.github.com> Date: Wed, 10 Jan 2024 17:40:13 +0530 Subject: [PATCH 02/16] Update tensor_prod.md --- docs/src/tensor_prod.md | 116 +++++++++++++++++++++++++++++++--------- 1 file changed, 91 insertions(+), 25 deletions(-) diff --git a/docs/src/tensor_prod.md b/docs/src/tensor_prod.md index c20902c1..664a5c69 100644 --- a/docs/src/tensor_prod.md +++ b/docs/src/tensor_prod.md @@ -2,39 +2,105 @@ The tensor product function is defined as: ``\[ f(x) = ∏ᵢ=₁ᵈ cos(aπxᵢ) \]`` -Let's import Surrogates and Plots: -```@example tensor -using Surrogates -using Plots -default() +Where\ +d: Respresents the dimensionality of the input vector x\ +xi: Represents the ith components of the input vector\ +a: A constant parameter + +# Generating Data and Plotting + ``` +function tensor_product_function(x, a) + return prod(cos.(a * π * xi) for xi in x) +end -Define the 1D objective function: -```@example tensor -function f(x) - a = 0.5; - return cos(a*pi*x) +# Generate training and test data +function generate_data(n, lb, ub, a) + x_train = sample(n, lb, ub, SobolSample()) + y_train = tensor_product_function(x_train, a) + + x_test = sample(1000, lb, ub, SobolSample()) # Generating test data + y_test = tensor_product_function(x_test, a) # Generating test labels + + return x_train, y_train, x_test, y_test end -``` -```@example tensor +# Visualize training data and the true function +function plot_data_and_true_function(x_train, y_train, x_test, y_test, a, lb, ub) + xs = range(lb, ub, length=1000) + + scatter(x_train, y_train, label="Training points", xlims=(lb, ub), ylims=(-1, 1), legend=:top) + plot!(xs, tensor_product_function.(Ref(xs), a), label="True function", legend=:top) + scatter!(x_test, y_test, label="Test points") +end + +# Generate data and plot n = 30 lb = -5.0 ub = 5.0 a = 0.5 -x = sample(n, lb, ub, SobolSample()) -y = f.(x) -xs = lb:0.001:ub -scatter(x, y, label="Sampled points", xlims=(lb, ub), ylims=(-1, 1), legend=:top) -plot!(xs, f.(xs), label="True function", legend=:top) + +x_train, y_train, x_test, y_test = generate_data(n, lb, ub, a) +plot_data_and_true_function(x_train, y_train, x_test, y_test, a, lb, ub) +``` + +# Training various Surrogates +Now let's train various surrogate models and evaluate their performance on the test data + +``` +# Train different surrogate models +function train_surrogates(x_train, y_train) + loba = LobachevskySurrogate(x_train, y_train) + krig = Kriging(x_train, y_train) + return loba, krig +end + +# Evaluate and compare surrogate model performances +function evaluate_surrogates(loba, krig, x_test) + loba_pred = loba(x_test) + krig_pred = krig(x_test) + return loba_pred, krig_pred +end + +# Plot surrogate predictions against the true function +function plot_surrogate_predictions(loba_pred, krig_pred, y_test, a, lb, ub) + xs = range(lb, ub, length=1000) + + plot(xs, tensor_product_function.(Ref(xs), a), label="True function", legend=:top) + plot!(xs, loba_pred, label="Lobachevsky") + plot!(xs, krig_pred, label="Kriging") +end + +# Train surrogates and evaluate their performance +loba, krig = train_surrogates(x_train, y_train) +loba_pred, krig_pred = evaluate_surrogates(loba, krig, x_test) + +# Plot surrogate predictions against the true function +plot_surrogate_predictions(loba_pred, krig_pred, y_test, a, lb, ub) ``` -Fitting and plotting different surrogates: -```@example tensor -loba_1 = LobachevskySurrogate(x, y, lb, ub) -krig = Kriging(x, y, lb, ub) -scatter(x, y, label="Sampled points", xlims=(lb, ub), ylims=(-2.5, 2.5), legend=:bottom) -plot!(xs,f.(xs), label="True function", legend=:top) -plot!(xs, loba_1.(xs), label="Lobachevsky", legend=:top) -plot!(xs, krig.(xs), label="Kriging", legend=:top) +# Reporting the best Surrogate Model +To determine the best surrogate, you can compare their accuracy and performance metrics on the test data. For instance, you can calculate and compare the mean squared error (MSE) or any other relevant metric + ``` +using Statistics + +# Evaluate performance metrics +function calculate_performance_metrics(pred, true_vals) + return mean((pred .- true_vals).^2) +end + +# Compare surrogate model performances +mse_loba = calculate_performance_metrics(loba_pred, y_test) +mse_krig = calculate_performance_metrics(krig_pred, y_test) + +if mse_loba < mse_krig + println("Lobachevsky Surrogate is the best with MSE: ", mse_loba) +else + println("Kriging Surrogate is the best with MSE: ", mse_krig) +end +``` + +This structure provides a framework for generating data, training various +surrogate models, evaluating their performance on test data, and reporting +the best surrogate based on performance metrics like MSE. Adjustments can made to suit the specific evaluation criteria or additional surrogate models. From acc001b10d176d5851e512d096120ab60906324a Mon Sep 17 00:00:00 2001 From: MRIDUL JAIN <105979087+Spinachboul@users.noreply.github.com> Date: Wed, 10 Jan 2024 17:42:19 +0530 Subject: [PATCH 03/16] Update tensor_prod.md --- docs/src/tensor_prod.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/tensor_prod.md b/docs/src/tensor_prod.md index 664a5c69..57374a42 100644 --- a/docs/src/tensor_prod.md +++ b/docs/src/tensor_prod.md @@ -3,7 +3,7 @@ The tensor product function is defined as: ``\[ f(x) = ∏ᵢ=₁ᵈ cos(aπxᵢ) \]`` Where\ -d: Respresents the dimensionality of the input vector x\ +d: Represents the dimensionality of the input vector x\ xi: Represents the ith components of the input vector\ a: A constant parameter From 7849439072f8803fc02bfeb04f678b644117bc16 Mon Sep 17 00:00:00 2001 From: MRIDUL JAIN <105979087+Spinachboul@users.noreply.github.com> Date: Sat, 13 Jan 2024 23:22:51 +0530 Subject: [PATCH 04/16] Update tensor_prod.md --- docs/src/tensor_prod.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/docs/src/tensor_prod.md b/docs/src/tensor_prod.md index 57374a42..472f5bc8 100644 --- a/docs/src/tensor_prod.md +++ b/docs/src/tensor_prod.md @@ -19,7 +19,7 @@ function generate_data(n, lb, ub, a) x_train = sample(n, lb, ub, SobolSample()) y_train = tensor_product_function(x_train, a) - x_test = sample(1000, lb, ub, SobolSample()) # Generating test data + x_test = sample(1000, lb, ub, RandomSample()) # Generating test data y_test = tensor_product_function(x_test, a) # Generating test labels return x_train, y_train, x_test, y_test @@ -28,10 +28,9 @@ end # Visualize training data and the true function function plot_data_and_true_function(x_train, y_train, x_test, y_test, a, lb, ub) xs = range(lb, ub, length=1000) - - scatter(x_train, y_train, label="Training points", xlims=(lb, ub), ylims=(-1, 1), legend=:top) - plot!(xs, tensor_product_function.(Ref(xs), a), label="True function", legend=:top) - scatter!(x_test, y_test, label="Test points") + plot(xs, tensor_product_function.(xs, a), label="True Function", legend=:top) + scatter!(x_train, repeat([y_train], length(x_train)), label="Training Points", xlims=(lb,ub), ylims=(-1,1)) + scatter!(x_test, repeat([y_test], length(x_test)), label="Test Points") end # Generate data and plot From 301351819970edd5986753b058cfd0d5672a13d6 Mon Sep 17 00:00:00 2001 From: MRIDUL JAIN <105979087+Spinachboul@users.noreply.github.com> Date: Sun, 14 Jan 2024 00:00:06 +0530 Subject: [PATCH 05/16] Update tensor_prod.md --- docs/src/tensor_prod.md | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/docs/src/tensor_prod.md b/docs/src/tensor_prod.md index 472f5bc8..015aef85 100644 --- a/docs/src/tensor_prod.md +++ b/docs/src/tensor_prod.md @@ -48,34 +48,35 @@ Now let's train various surrogate models and evaluate their performance on the t ``` # Train different surrogate models -function train_surrogates(x_train, y_train) - loba = LobachevskySurrogate(x_train, y_train) - krig = Kriging(x_train, y_train) +function train_surrogates(x_train, y_train, lb, ub, alpha=2.0, n=6) + loba = LobachevskySurrogate(x_train, y_train, lb, ub, alpha=alpha, n=n) + krig = Kriging(x_train, y_train, lb, ub) return loba, krig end # Evaluate and compare surrogate model performances function evaluate_surrogates(loba, krig, x_test) - loba_pred = loba(x_test) - krig_pred = krig(x_test) + loba_pred = loba.(x_test) + krig_pred = krig.(x_test) return loba_pred, krig_pred end # Plot surrogate predictions against the true function -function plot_surrogate_predictions(loba_pred, krig_pred, y_test, a, lb, ub) - xs = range(lb, ub, length=1000) - - plot(xs, tensor_product_function.(Ref(xs), a), label="True function", legend=:top) - plot!(xs, loba_pred, label="Lobachevsky") - plot!(xs, krig_pred, label="Kriging") +function plot_surrogate_predictions(loba_pred, krig_pred, x_test, y_test, a, lb, ub) + xs = collect(x_test) # Convert x_test to an array + plot(xs, tensor_product_function.(xs, a), label="True Function", legend=:top) + plot!(collect(x_test), loba_pred, seriestype=:scatter, label="Lobachevsky") + plot!(collect(x_test), krig_pred, seriestype=:scatter, label="Kriging") + plot!(collect(x_test), fill(y_test, length(x_test)), seriestype=:scatter, label="Sampled points") # Use fill to create an array of the same length as x_test end # Train surrogates and evaluate their performance -loba, krig = train_surrogates(x_train, y_train) +lb, ub = minimum(x_train), maximum(x_train) +loba, krig = train_surrogates(x_train, y_train, lb, ub) loba_pred, krig_pred = evaluate_surrogates(loba, krig, x_test) -# Plot surrogate predictions against the true function -plot_surrogate_predictions(loba_pred, krig_pred, y_test, a, lb, ub) +# Plotting Results +plot_surrogate_predictions(loba_pred, krig_pred, x_test, y_test, 2.0, lb, ub) ``` # Reporting the best Surrogate Model From 0e7bf665f157b8d04222c15be591aa1e930d61f5 Mon Sep 17 00:00:00 2001 From: MRIDUL JAIN <105979087+Spinachboul@users.noreply.github.com> Date: Sun, 14 Jan 2024 07:13:11 +0530 Subject: [PATCH 06/16] Update tensor_prod.md --- docs/src/tensor_prod.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/src/tensor_prod.md b/docs/src/tensor_prod.md index 015aef85..27b395a2 100644 --- a/docs/src/tensor_prod.md +++ b/docs/src/tensor_prod.md @@ -9,7 +9,7 @@ a: A constant parameter # Generating Data and Plotting -``` +```@example function tensor_product_function(x, a) return prod(cos.(a * π * xi) for xi in x) end @@ -46,7 +46,7 @@ plot_data_and_true_function(x_train, y_train, x_test, y_test, a, lb, ub) # Training various Surrogates Now let's train various surrogate models and evaluate their performance on the test data -``` +```@example # Train different surrogate models function train_surrogates(x_train, y_train, lb, ub, alpha=2.0, n=6) loba = LobachevskySurrogate(x_train, y_train, lb, ub, alpha=alpha, n=n) @@ -82,7 +82,7 @@ plot_surrogate_predictions(loba_pred, krig_pred, x_test, y_test, 2.0, lb, ub) # Reporting the best Surrogate Model To determine the best surrogate, you can compare their accuracy and performance metrics on the test data. For instance, you can calculate and compare the mean squared error (MSE) or any other relevant metric -``` +```@example using Statistics # Evaluate performance metrics From c605735adf51fb6416b786f5fe131ec690fd087d Mon Sep 17 00:00:00 2001 From: MRIDUL JAIN <105979087+Spinachboul@users.noreply.github.com> Date: Sun, 14 Jan 2024 07:15:06 +0530 Subject: [PATCH 07/16] Update tensor_prod.md --- docs/src/tensor_prod.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/src/tensor_prod.md b/docs/src/tensor_prod.md index 27b395a2..6fd79f01 100644 --- a/docs/src/tensor_prod.md +++ b/docs/src/tensor_prod.md @@ -9,7 +9,7 @@ a: A constant parameter # Generating Data and Plotting -```@example +```@example tensor function tensor_product_function(x, a) return prod(cos.(a * π * xi) for xi in x) end @@ -46,7 +46,7 @@ plot_data_and_true_function(x_train, y_train, x_test, y_test, a, lb, ub) # Training various Surrogates Now let's train various surrogate models and evaluate their performance on the test data -```@example +```@example tensor # Train different surrogate models function train_surrogates(x_train, y_train, lb, ub, alpha=2.0, n=6) loba = LobachevskySurrogate(x_train, y_train, lb, ub, alpha=alpha, n=n) @@ -82,7 +82,7 @@ plot_surrogate_predictions(loba_pred, krig_pred, x_test, y_test, 2.0, lb, ub) # Reporting the best Surrogate Model To determine the best surrogate, you can compare their accuracy and performance metrics on the test data. For instance, you can calculate and compare the mean squared error (MSE) or any other relevant metric -```@example +```@example tensor using Statistics # Evaluate performance metrics From 1307c4ecc5b927f54866a8ca5a7405d558a96cde Mon Sep 17 00:00:00 2001 From: MRIDUL JAIN <105979087+Spinachboul@users.noreply.github.com> Date: Sun, 14 Jan 2024 10:32:22 +0530 Subject: [PATCH 08/16] Update tensor_prod.md --- docs/src/tensor_prod.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/src/tensor_prod.md b/docs/src/tensor_prod.md index 6fd79f01..edb50ff3 100644 --- a/docs/src/tensor_prod.md +++ b/docs/src/tensor_prod.md @@ -7,6 +7,13 @@ d: Represents the dimensionality of the input vector x\ xi: Represents the ith components of the input vector\ a: A constant parameter +Let's import Surrogates and Plots +``` +using Surrogates +using Plots +default() +``` + # Generating Data and Plotting ```@example tensor From 3cab1bfc0c0add6614ac12879eab8408edc2d52e Mon Sep 17 00:00:00 2001 From: MRIDUL JAIN <105979087+Spinachboul@users.noreply.github.com> Date: Sun, 14 Jan 2024 10:50:55 +0530 Subject: [PATCH 09/16] Update tensor_prod.md --- docs/src/tensor_prod.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/src/tensor_prod.md b/docs/src/tensor_prod.md index edb50ff3..e6518e17 100644 --- a/docs/src/tensor_prod.md +++ b/docs/src/tensor_prod.md @@ -53,7 +53,7 @@ plot_data_and_true_function(x_train, y_train, x_test, y_test, a, lb, ub) # Training various Surrogates Now let's train various surrogate models and evaluate their performance on the test data -```@example tensor +``` # Train different surrogate models function train_surrogates(x_train, y_train, lb, ub, alpha=2.0, n=6) loba = LobachevskySurrogate(x_train, y_train, lb, ub, alpha=alpha, n=n) @@ -89,7 +89,7 @@ plot_surrogate_predictions(loba_pred, krig_pred, x_test, y_test, 2.0, lb, ub) # Reporting the best Surrogate Model To determine the best surrogate, you can compare their accuracy and performance metrics on the test data. For instance, you can calculate and compare the mean squared error (MSE) or any other relevant metric -```@example tensor +``` using Statistics # Evaluate performance metrics From 93b10cea14d6414344e5287a6b07c776edfd476a Mon Sep 17 00:00:00 2001 From: MRIDUL JAIN <105979087+Spinachboul@users.noreply.github.com> Date: Sun, 14 Jan 2024 11:13:16 +0530 Subject: [PATCH 10/16] Update tensor_prod.md --- docs/src/tensor_prod.md | 44 +++++++++++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/docs/src/tensor_prod.md b/docs/src/tensor_prod.md index e6518e17..b6e56f51 100644 --- a/docs/src/tensor_prod.md +++ b/docs/src/tensor_prod.md @@ -14,14 +14,16 @@ using Plots default() ``` -# Generating Data and Plotting +Generating Data and Plotting ```@example tensor function tensor_product_function(x, a) return prod(cos.(a * π * xi) for xi in x) end +``` -# Generate training and test data +Generate training and test data +```@example tensor function generate_data(n, lb, ub, a) x_train = sample(n, lb, ub, SobolSample()) y_train = tensor_product_function(x_train, a) @@ -31,16 +33,20 @@ function generate_data(n, lb, ub, a) return x_train, y_train, x_test, y_test end +``` -# Visualize training data and the true function +Visualize training data and the true function +```example tensor function plot_data_and_true_function(x_train, y_train, x_test, y_test, a, lb, ub) xs = range(lb, ub, length=1000) plot(xs, tensor_product_function.(xs, a), label="True Function", legend=:top) scatter!(x_train, repeat([y_train], length(x_train)), label="Training Points", xlims=(lb,ub), ylims=(-1,1)) scatter!(x_test, repeat([y_test], length(x_test)), label="Test Points") end +``` -# Generate data and plot +Generate data and plot +```@example tensor n = 30 lb = -5.0 ub = 5.0 @@ -50,25 +56,27 @@ x_train, y_train, x_test, y_test = generate_data(n, lb, ub, a) plot_data_and_true_function(x_train, y_train, x_test, y_test, a, lb, ub) ``` -# Training various Surrogates -Now let's train various surrogate models and evaluate their performance on the test data +Training various Surrogates -``` -# Train different surrogate models +```@example tensor function train_surrogates(x_train, y_train, lb, ub, alpha=2.0, n=6) loba = LobachevskySurrogate(x_train, y_train, lb, ub, alpha=alpha, n=n) krig = Kriging(x_train, y_train, lb, ub) return loba, krig end +``` -# Evaluate and compare surrogate model performances +Evaluate and compare surrogate model performances +```@example tensor function evaluate_surrogates(loba, krig, x_test) loba_pred = loba.(x_test) krig_pred = krig.(x_test) return loba_pred, krig_pred end +``` -# Plot surrogate predictions against the true function +Plot surrogate predictions against the true function +```@example tensor function plot_surrogate_predictions(loba_pred, krig_pred, x_test, y_test, a, lb, ub) xs = collect(x_test) # Convert x_test to an array plot(xs, tensor_product_function.(xs, a), label="True Function", legend=:top) @@ -76,28 +84,34 @@ function plot_surrogate_predictions(loba_pred, krig_pred, x_test, y_test, a, lb, plot!(collect(x_test), krig_pred, seriestype=:scatter, label="Kriging") plot!(collect(x_test), fill(y_test, length(x_test)), seriestype=:scatter, label="Sampled points") # Use fill to create an array of the same length as x_test end +``` -# Train surrogates and evaluate their performance +Train surrogates and evaluate their performance +```@example tensor lb, ub = minimum(x_train), maximum(x_train) loba, krig = train_surrogates(x_train, y_train, lb, ub) loba_pred, krig_pred = evaluate_surrogates(loba, krig, x_test) +``` -# Plotting Results +Plotting Results +```@example tensor plot_surrogate_predictions(loba_pred, krig_pred, x_test, y_test, 2.0, lb, ub) ``` -# Reporting the best Surrogate Model +Reporting the best Surrogate Model To determine the best surrogate, you can compare their accuracy and performance metrics on the test data. For instance, you can calculate and compare the mean squared error (MSE) or any other relevant metric -``` +```@example tensor using Statistics # Evaluate performance metrics function calculate_performance_metrics(pred, true_vals) return mean((pred .- true_vals).^2) end +``` -# Compare surrogate model performances +Compare surrogate model performances +```@example tensor mse_loba = calculate_performance_metrics(loba_pred, y_test) mse_krig = calculate_performance_metrics(krig_pred, y_test) From 7fec363028e1759eea4c450122f1ec457b2983e4 Mon Sep 17 00:00:00 2001 From: MRIDUL JAIN <105979087+Spinachboul@users.noreply.github.com> Date: Sun, 14 Jan 2024 17:35:43 +0530 Subject: [PATCH 11/16] Update tensor_prod.md --- docs/src/tensor_prod.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/tensor_prod.md b/docs/src/tensor_prod.md index b6e56f51..4ee861e8 100644 --- a/docs/src/tensor_prod.md +++ b/docs/src/tensor_prod.md @@ -8,7 +8,7 @@ xi: Represents the ith components of the input vector\ a: A constant parameter Let's import Surrogates and Plots -``` +```@example tensor using Surrogates using Plots default() From 6b079439265e0f63bde7edf50a8dcc5842ab531e Mon Sep 17 00:00:00 2001 From: MRIDUL JAIN <105979087+Spinachboul@users.noreply.github.com> Date: Mon, 15 Jan 2024 09:17:27 +0530 Subject: [PATCH 12/16] Update tensor_prod.md --- docs/src/tensor_prod.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/tensor_prod.md b/docs/src/tensor_prod.md index 4ee861e8..6287d700 100644 --- a/docs/src/tensor_prod.md +++ b/docs/src/tensor_prod.md @@ -36,7 +36,7 @@ end ``` Visualize training data and the true function -```example tensor +```@example tensor function plot_data_and_true_function(x_train, y_train, x_test, y_test, a, lb, ub) xs = range(lb, ub, length=1000) plot(xs, tensor_product_function.(xs, a), label="True Function", legend=:top) From 3265f6c85e50e471786f568a573b2df1abab457c Mon Sep 17 00:00:00 2001 From: MRIDUL JAIN <105979087+Spinachboul@users.noreply.github.com> Date: Mon, 15 Jan 2024 13:44:51 +0530 Subject: [PATCH 13/16] Update tensor_prod.md --- docs/src/tensor_prod.md | 102 ++++++++++++++-------------------------- 1 file changed, 34 insertions(+), 68 deletions(-) diff --git a/docs/src/tensor_prod.md b/docs/src/tensor_prod.md index 6287d700..9dd9d2b5 100644 --- a/docs/src/tensor_prod.md +++ b/docs/src/tensor_prod.md @@ -11,28 +11,24 @@ Let's import Surrogates and Plots ```@example tensor using Surrogates using Plots +using Statistics default() ``` Generating Data and Plotting ```@example tensor -function tensor_product_function(x, a) - return prod(cos.(a * π * xi) for xi in x) +function tensor_product_function(x) + return prod(cos.(a*pi*x)) end ``` -Generate training and test data +Sampling parameters for training and test data ```@example tensor -function generate_data(n, lb, ub, a) - x_train = sample(n, lb, ub, SobolSample()) - y_train = tensor_product_function(x_train, a) - - x_test = sample(1000, lb, ub, RandomSample()) # Generating test data - y_test = tensor_product_function(x_test, a) # Generating test labels - - return x_train, y_train, x_test, y_test -end +n = 30 # Number of training points +lb = -5.0 # Lower bound of sampling range +ub = 5.0 # Upper bound of sampling range + ``` Visualize training data and the true function @@ -45,76 +41,41 @@ function plot_data_and_true_function(x_train, y_train, x_test, y_test, a, lb, ub end ``` -Generate data and plot +Generate training and test data ```@example tensor -n = 30 -lb = -5.0 -ub = 5.0 -a = 0.5 - -x_train, y_train, x_test, y_test = generate_data(n, lb, ub, a) -plot_data_and_true_function(x_train, y_train, x_test, y_test, a, lb, ub) +x_train = sample(n, lb, ub, SobolSample()) # Sample training data points +y_train = f.(x_train) # Calculate corresponding function values +x_test = sample(1000, lb, ub, RandomSample()) # Sample larger test data set +y_test = f.(x_test) # Calculate corresponding true function values ``` -Training various Surrogates - +Train two surrogates: Lobachevsky and Kriging ```@example tensor -function train_surrogates(x_train, y_train, lb, ub, alpha=2.0, n=6) - loba = LobachevskySurrogate(x_train, y_train, lb, ub, alpha=alpha, n=n) - krig = Kriging(x_train, y_train, lb, ub) - return loba, krig -end +loba_surrogate = LobachevskySurrogate(x_train, y_train, lb, ub) # Train Lobachevsky surrogate +krig_surrogate = Kriging(x_train, y_train, lb, ub) # Train Kriging surrogate ``` -Evaluate and compare surrogate model performances +Obtain predictions from both surrogates for the test data ```@example tensor -function evaluate_surrogates(loba, krig, x_test) - loba_pred = loba.(x_test) - krig_pred = krig.(x_test) - return loba_pred, krig_pred -end +loba_pred = loba_surrogate.(x_test) # Predict using Lobachevsky surrogate +krig_pred = krig_surrogate.(x_test) # Predict using Kriging surrogate ``` -Plot surrogate predictions against the true function +Define a function to calculate Mean Squared Error (MSE) ```@example tensor -function plot_surrogate_predictions(loba_pred, krig_pred, x_test, y_test, a, lb, ub) - xs = collect(x_test) # Convert x_test to an array - plot(xs, tensor_product_function.(xs, a), label="True Function", legend=:top) - plot!(collect(x_test), loba_pred, seriestype=:scatter, label="Lobachevsky") - plot!(collect(x_test), krig_pred, seriestype=:scatter, label="Kriging") - plot!(collect(x_test), fill(y_test, length(x_test)), seriestype=:scatter, label="Sampled points") # Use fill to create an array of the same length as x_test +function calculate_mse(predictions, true_values) + return mean((predictions .- true_values).^2) # Calculate mean of squared errors end ``` -Train surrogates and evaluate their performance +Calculate MSE for both surrogates ```@example tensor -lb, ub = minimum(x_train), maximum(x_train) -loba, krig = train_surrogates(x_train, y_train, lb, ub) -loba_pred, krig_pred = evaluate_surrogates(loba, krig, x_test) +mse_loba = calculate_mse(loba_pred, y_test) # Calculate Lobachevsky's MSE +mse_krig = calculate_mse(krig_pred, y_test) # Calculate Kriging's MSE ``` -Plotting Results +Compare performance and print best-performing surrogate based on MSE ```@example tensor -plot_surrogate_predictions(loba_pred, krig_pred, x_test, y_test, 2.0, lb, ub) -``` - -Reporting the best Surrogate Model -To determine the best surrogate, you can compare their accuracy and performance metrics on the test data. For instance, you can calculate and compare the mean squared error (MSE) or any other relevant metric - -```@example tensor -using Statistics - -# Evaluate performance metrics -function calculate_performance_metrics(pred, true_vals) - return mean((pred .- true_vals).^2) -end -``` - -Compare surrogate model performances -```@example tensor -mse_loba = calculate_performance_metrics(loba_pred, y_test) -mse_krig = calculate_performance_metrics(krig_pred, y_test) - if mse_loba < mse_krig println("Lobachevsky Surrogate is the best with MSE: ", mse_loba) else @@ -122,6 +83,11 @@ else end ``` -This structure provides a framework for generating data, training various -surrogate models, evaluating their performance on test data, and reporting -the best surrogate based on performance metrics like MSE. Adjustments can made to suit the specific evaluation criteria or additional surrogate models. +Plot true function vs. model predictions +```@example tensor +xs = lb:0.01:ub +plot(xs, f.(xs), label="True function", legend=:top, color=:black) +plot!(xs, loba_surrogate.(xs), label="Lobachevsky", legend=:top, color=:red) +plot!(xs, krig_surrogate.(xs), label="Kriging", legend=:top, color=:blue) +``` +This structure provides a framework for generating data, training various surrogate models, evaluating their performance on test data, and reporting the best surrogate based on performance metrics like MSE. Adjustments can made to suit the specific evaluation criteria or additional surrogate models. From fe33b1dc1457e117b7fee00cf4299c3e138bff50 Mon Sep 17 00:00:00 2001 From: MRIDUL JAIN <105979087+Spinachboul@users.noreply.github.com> Date: Mon, 15 Jan 2024 14:05:04 +0530 Subject: [PATCH 14/16] Update tensor_prod.md --- docs/src/tensor_prod.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/src/tensor_prod.md b/docs/src/tensor_prod.md index 9dd9d2b5..9ab69f0f 100644 --- a/docs/src/tensor_prod.md +++ b/docs/src/tensor_prod.md @@ -25,10 +25,9 @@ end Sampling parameters for training and test data ```@example tensor -n = 30 # Number of training points lb = -5.0 # Lower bound of sampling range ub = 5.0 # Upper bound of sampling range - +n = 30 # Number of training points ``` Visualize training data and the true function @@ -44,9 +43,9 @@ end Generate training and test data ```@example tensor x_train = sample(n, lb, ub, SobolSample()) # Sample training data points -y_train = f.(x_train) # Calculate corresponding function values +y_train = tensor_product_function.(x_train) # Calculate corresponding function values x_test = sample(1000, lb, ub, RandomSample()) # Sample larger test data set -y_test = f.(x_test) # Calculate corresponding true function values +y_test = tensor_product_function.(x_test) # Calculate corresponding true function values ``` Train two surrogates: Lobachevsky and Kriging From 33880bf34a6360da6e1b5299db0315a08fde16cd Mon Sep 17 00:00:00 2001 From: MRIDUL JAIN <105979087+Spinachboul@users.noreply.github.com> Date: Mon, 15 Jan 2024 14:25:59 +0530 Subject: [PATCH 15/16] Update tensor_prod.md --- docs/src/tensor_prod.md | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/docs/src/tensor_prod.md b/docs/src/tensor_prod.md index 9ab69f0f..622e1f1f 100644 --- a/docs/src/tensor_prod.md +++ b/docs/src/tensor_prod.md @@ -19,6 +19,7 @@ Generating Data and Plotting ```@example tensor function tensor_product_function(x) + a = 0.5 return prod(cos.(a*pi*x)) end ``` @@ -30,16 +31,6 @@ ub = 5.0 # Upper bound of sampling range n = 30 # Number of training points ``` -Visualize training data and the true function -```@example tensor -function plot_data_and_true_function(x_train, y_train, x_test, y_test, a, lb, ub) - xs = range(lb, ub, length=1000) - plot(xs, tensor_product_function.(xs, a), label="True Function", legend=:top) - scatter!(x_train, repeat([y_train], length(x_train)), label="Training Points", xlims=(lb,ub), ylims=(-1,1)) - scatter!(x_test, repeat([y_test], length(x_test)), label="Test Points") -end -``` - Generate training and test data ```@example tensor x_train = sample(n, lb, ub, SobolSample()) # Sample training data points @@ -85,7 +76,7 @@ end Plot true function vs. model predictions ```@example tensor xs = lb:0.01:ub -plot(xs, f.(xs), label="True function", legend=:top, color=:black) +plot(xs, tensor_product_function.(xs), label="True function", legend=:top, color=:black) plot!(xs, loba_surrogate.(xs), label="Lobachevsky", legend=:top, color=:red) plot!(xs, krig_surrogate.(xs), label="Kriging", legend=:top, color=:blue) ``` From af820a3f2e6af557fece28e3579a768eda80ea43 Mon Sep 17 00:00:00 2001 From: MRIDUL JAIN <105979087+Spinachboul@users.noreply.github.com> Date: Sat, 20 Jan 2024 10:22:35 +0530 Subject: [PATCH 16/16] Update tensor_prod.md --- docs/src/tensor_prod.md | 87 ++++++++++++++++++++++++++++++----------- 1 file changed, 65 insertions(+), 22 deletions(-) diff --git a/docs/src/tensor_prod.md b/docs/src/tensor_prod.md index 622e1f1f..0f6dd25d 100644 --- a/docs/src/tensor_prod.md +++ b/docs/src/tensor_prod.md @@ -7,15 +7,17 @@ d: Represents the dimensionality of the input vector x\ xi: Represents the ith components of the input vector\ a: A constant parameter -Let's import Surrogates and Plots +# Let's import Surrogates and Plots ```@example tensor using Surrogates using Plots using Statistics +using SurrogatesPolyChaos +using SurrogatesRandomForest default() ``` -Generating Data and Plotting +# Define the function ```@example tensor function tensor_product_function(x) @@ -24,14 +26,14 @@ function tensor_product_function(x) end ``` -Sampling parameters for training and test data +# Sampling parameters for training and test data ```@example tensor lb = -5.0 # Lower bound of sampling range ub = 5.0 # Upper bound of sampling range n = 30 # Number of training points ``` -Generate training and test data +# Generate training and test data ```@example tensor x_train = sample(n, lb, ub, SobolSample()) # Sample training data points y_train = tensor_product_function.(x_train) # Calculate corresponding function values @@ -39,45 +41,86 @@ x_test = sample(1000, lb, ub, RandomSample()) # Sample larger test data set y_test = tensor_product_function.(x_test) # Calculate corresponding true function values ``` -Train two surrogates: Lobachevsky and Kriging +# Plot training and testing points ```@example tensor -loba_surrogate = LobachevskySurrogate(x_train, y_train, lb, ub) # Train Lobachevsky surrogate -krig_surrogate = Kriging(x_train, y_train, lb, ub) # Train Kriging surrogate +scatter(x_train, y_train, label="Training Points", xlabel="X-axis", ylabel="Y-axis", legend=:topright) +scatter!(x_test, y_test, label="Testing Points") ``` -Obtain predictions from both surrogates for the test data +# Train the following Surrogates: +## Kriging | Lobachevsky | Radial Basis | RandomForest | Polynomial Chaos ```@example tensor -loba_pred = loba_surrogate.(x_test) # Predict using Lobachevsky surrogate -krig_pred = krig_surrogate.(x_test) # Predict using Kriging surrogate +num_round = 2 +alpha = 2.0 +n = 6 +randomforest_surrogate = RandomForestSurrogate(x_train ,y_train ,lb, ub, num_round = 2) +radial_surrogate = RadialBasis(x_train, y_train, lb, ub) +lobachevsky_surrogate = LobachevskySurrogate(x_train, y_train, lb, ub, alpha = 2.0, n = 6) +kriging_surrogate = Kriging(x_train, y_train, lb, ub) +poly1 = PolynomialChaosSurrogate(x_train,y_train,lb,ub) +poly2 = PolynomialChaosSurrogate(x_train,y_train,lb,ub, op = SurrogatesPolyChaos.GaussOrthoPoly(5)) ``` -Define a function to calculate Mean Squared Error (MSE) +# Obtain predictions from all surrogates for the test data +```@example tensor +loba_pred = lobachevsky_surrogate.(x_test) +radial_pred = radial_surrogate.(x_test) +kriging_pred = kriging_surrogate.(x_test) +random_forest_pred = randomforest_surrogate.(x_test) +poly1_pred = poly1.(x_test) +poly2_pred = poly2.(x_test) +``` + +# Define a function to calculate Mean Squared Error (MSE) ```@example tensor function calculate_mse(predictions, true_values) return mean((predictions .- true_values).^2) # Calculate mean of squared errors end ``` -Calculate MSE for both surrogates +# Calculate MSE for all Surrogate Models ```@example tensor -mse_loba = calculate_mse(loba_pred, y_test) # Calculate Lobachevsky's MSE -mse_krig = calculate_mse(krig_pred, y_test) # Calculate Kriging's MSE +mse_loba = calculate_mse(loba_pred, y_test) +mse_krig = calculate_mse(kriging_pred, y_test) +mse_radial = calculate_mse(radial_pred, y_test) +mse_rf = calculate_mse(random_forest_pred, y_test) +mse_poly1 = calculate_mse(poly1_pred, y_test) +mse_poly2 = calculate_mse(poly2_pred, y_test) ``` -Compare performance and print best-performing surrogate based on MSE +# Compare the performance of all Surrogate Models ```@example tensor -if mse_loba < mse_krig - println("Lobachevsky Surrogate is the best with MSE: ", mse_loba) -else - println("Kriging Surrogate is the best with MSE: ", mse_krig) +mse_values = Dict("loba" => mse_loba, "krig" => mse_krig, "radial" => mse_radial, "rf" => mse_rf, "poly1" => mse_poly1, "poly2" => mse_poly2) + +# Sort the MSE values in ascending order and display them +sorted_mse = sort(collect(mse_values), by=x->x[2]) +for (model, mse) in sorted_mse + println("$model : $mse") end ``` -Plot true function vs. model predictions +# Plot true function vs. model predictions ```@example tensor xs = lb:0.01:ub plot(xs, tensor_product_function.(xs), label="True function", legend=:top, color=:black) -plot!(xs, loba_surrogate.(xs), label="Lobachevsky", legend=:top, color=:red) -plot!(xs, krig_surrogate.(xs), label="Kriging", legend=:top, color=:blue) +plot!(xs, lobachevsky_surrogate.(xs), label="Lobachevsky", legend=:top, color=:red) +plot!(xs, kriging_surrogate.(xs), label="Kriging", legend=:top, color=:blue) +plot!(xs, randomforest_surrogate.(xs), label="Random Forest", legend=:top, color=:green) +plot!(xs, poly1.(xs), label="Polynomial Chaos", legend=:top, color=:purple) +plot!(xs, poly2.(xs), label="Polynomial Chaos", legend=:top, color=:purple) +plot!(xs, radial_surrogate.(xs), label="Radials", legend=:top, color=:orange) ``` + +# Tabular Representation of all Surrogates and their MSE Scores + +| Surrogate Model | MSE Score | +|-------------------|----------------------| +| Kriging | 4.70493378010316e-5 | +| Lobachevsky | 7.967792682690972e-5| +| Radial Basis | 0.004972603698976124 | +| RandomForest | 0.2023233139232778 | +| Poly1 | 0.4124881232761028 | +| Poly2 | 0.42166909818265136 | + + This structure provides a framework for generating data, training various surrogate models, evaluating their performance on test data, and reporting the best surrogate based on performance metrics like MSE. Adjustments can made to suit the specific evaluation criteria or additional surrogate models.