From 5bd39ae80518890d4aedf1fbd70a25a1d0304331 Mon Sep 17 00:00:00 2001 From: partev Date: Sat, 30 Dec 2023 20:53:44 -0500 Subject: [PATCH 01/11] fix a typo: cannonical -> canonical fix a typo: cannonical -> canonical --- examples/01_Exact_GPs/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/01_Exact_GPs/index.rst b/examples/01_Exact_GPs/index.rst index ce91fd315..720795a4f 100644 --- a/examples/01_Exact_GPs/index.rst +++ b/examples/01_Exact_GPs/index.rst @@ -1,7 +1,7 @@ Exact GPs (Regression) ======================== -Regression with a Gaussian noise model is the cannonical example of Gaussian processes. +Regression with a Gaussian noise model is the canonical example of Gaussian processes. These examples will work for small to medium sized datasets (~2,000 data points). All examples here use exact GP inference. From 99a21310ca58156bba6c022023550f1580e53053 Mon Sep 17 00:00:00 2001 From: partev Date: Sun, 31 Dec 2023 01:00:30 -0500 Subject: [PATCH 02/11] DOC: fix broken formatting in leave_one_out_pseudo_likelihood.py --- gpytorch/mlls/leave_one_out_pseudo_likelihood.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpytorch/mlls/leave_one_out_pseudo_likelihood.py b/gpytorch/mlls/leave_one_out_pseudo_likelihood.py index f71c819e8..becb3b06e 100644 --- a/gpytorch/mlls/leave_one_out_pseudo_likelihood.py +++ b/gpytorch/mlls/leave_one_out_pseudo_likelihood.py @@ -47,7 +47,7 @@ def __init__(self, likelihood, model): def forward(self, function_dist: MultivariateNormal, target: Tensor, *params) -> Tensor: r""" - Computes the leave one out likelihood given :math:`p(\mathbf f)` and `\mathbf y` + Computes the leave one out likelihood given :math:`p(\mathbf f)` and :math:`\mathbf y` :param ~gpytorch.distributions.MultivariateNormal output: the outputs of the latent function (the :obj:`~gpytorch.models.GP`) From fe6171a81d4324f89d4af9c0c794eeb7bda1c972 Mon Sep 17 00:00:00 2001 From: partev Date: Mon, 1 Jan 2024 01:49:05 -0500 Subject: [PATCH 03/11] DOC: fix formatting issue in RFFKernel documentation --- gpytorch/kernels/rff_kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpytorch/kernels/rff_kernel.py b/gpytorch/kernels/rff_kernel.py index 68455d220..9ea0aa29c 100644 --- a/gpytorch/kernels/rff_kernel.py +++ b/gpytorch/kernels/rff_kernel.py @@ -35,7 +35,7 @@ class RFFKernel(Kernel): .. math:: \begin{equation} - k(\Delta) = \exp{(-\frac{\Delta^2}{2\sigma^2})}$ and $p(\omega) = \exp{(-\frac{\sigma^2\omega^2}{2})} + k(\Delta) = \exp{(-\frac{\Delta^2}{2\sigma^2})} \text{and} p(\omega) = \exp{(-\frac{\sigma^2\omega^2}{2})} \end{equation} where :math:`\Delta = x - x'`. From ebecc74b1fb9346f5d584747ddaf35e95eed7770 Mon Sep 17 00:00:00 2001 From: partev Date: Tue, 2 Jan 2024 23:05:05 -0500 Subject: [PATCH 04/11] fix a typo --- docs/source/kernels.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/kernels.rst b/docs/source/kernels.rst index 714e46a6c..5c7ae0945 100644 --- a/docs/source/kernels.rst +++ b/docs/source/kernels.rst @@ -9,7 +9,7 @@ gpytorch.kernels If you don't know what kernel to use, we recommend that you start out with a -:code:`gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel)`. +:code:`gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())`. Kernel From 4d5bb80a77377cc0d7c0ffd1a1a99869ef60cf1a Mon Sep 17 00:00:00 2001 From: partev Date: Thu, 4 Jan 2024 23:54:03 -0500 Subject: [PATCH 05/11] add extra spaces around "and" --- gpytorch/kernels/rff_kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpytorch/kernels/rff_kernel.py b/gpytorch/kernels/rff_kernel.py index 9ea0aa29c..c6b5e4ccd 100644 --- a/gpytorch/kernels/rff_kernel.py +++ b/gpytorch/kernels/rff_kernel.py @@ -35,7 +35,7 @@ class RFFKernel(Kernel): .. math:: \begin{equation} - k(\Delta) = \exp{(-\frac{\Delta^2}{2\sigma^2})} \text{and} p(\omega) = \exp{(-\frac{\sigma^2\omega^2}{2})} + k(\Delta) = \exp{(-\frac{\Delta^2}{2\sigma^2})} \text{ and } p(\omega) = \exp{(-\frac{\sigma^2\omega^2}{2})} \end{equation} where :math:`\Delta = x - x'`. From ecca3bee5a84b3979962445a093da162f442b7c4 Mon Sep 17 00:00:00 2001 From: Christopher Yeh Date: Fri, 1 Mar 2024 18:22:41 -0800 Subject: [PATCH 06/11] Update distributions.rst Fix typo --- docs/source/distributions.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/distributions.rst b/docs/source/distributions.rst index a98bd862b..e8ceeb9ee 100644 --- a/docs/source/distributions.rst +++ b/docs/source/distributions.rst @@ -5,7 +5,7 @@ gpytorch.distributions =================================== GPyTorch distribution objects are essentially the same as torch distribution objects. -For the most part, GpyTorch relies on torch's distribution library. +For the most part, GPyTorch relies on torch's distribution library. However, we offer two custom distributions. We implement a custom :obj:`~gpytorch.distributions.MultivariateNormal` that accepts From 10f8c7a2adcaa1a93fce45cf5118a901609be2f1 Mon Sep 17 00:00:00 2001 From: Johannes Kopton Date: Sun, 10 Mar 2024 23:38:37 +0100 Subject: [PATCH 07/11] Fix typo in docstring. --- gpytorch/kernels/kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpytorch/kernels/kernel.py b/gpytorch/kernels/kernel.py index b8e5f34ec..69dab421c 100644 --- a/gpytorch/kernels/kernel.py +++ b/gpytorch/kernels/kernel.py @@ -236,7 +236,7 @@ def forward( ) -> Union[Tensor, LinearOperator]: r""" Computes the covariance between :math:`\mathbf x_1` and :math:`\mathbf x_2`. - This method should be imlemented by all Kernel subclasses. + This method should be implemented by all Kernel subclasses. :param x1: First set of data (... x N x D). :param x2: Second set of data (... x M x D). From fcbf685685b25dd54312999cb82a8a92799aa75c Mon Sep 17 00:00:00 2001 From: Geoff Pleiss <824157+gpleiss@users.noreply.github.com> Date: Mon, 18 Mar 2024 19:47:04 +0100 Subject: [PATCH 08/11] Fix flaky SVGP classification test (#2495) --- gpytorch/test/base_keops_test_case.py | 6 +++--- setup.py | 1 + test/examples/test_svgp_gp_classification.py | 8 ++++---- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/gpytorch/test/base_keops_test_case.py b/gpytorch/test/base_keops_test_case.py index fb261c860..ca32b4d64 100644 --- a/gpytorch/test/base_keops_test_case.py +++ b/gpytorch/test/base_keops_test_case.py @@ -66,7 +66,7 @@ def test_forward_x1_neq_x2(self, use_keops=True, ard=False, **kwargs): # The patch makes sure that we're actually using KeOps k1 = kern1(x1, x2).to_dense() k2 = kern2(x1, x2).to_dense() - self.assertLess(torch.norm(k1 - k2), 1e-4) + self.assertLess(torch.norm(k1 - k2), 1e-3) if use_keops: self.assertTrue(keops_mock.called) @@ -86,7 +86,7 @@ def test_batch_matmul(self, use_keops=True, **kwargs): # The patch makes sure that we're actually using KeOps res1 = kern1(x1, x1).matmul(rhs) res2 = kern2(x1, x1).matmul(rhs) - self.assertLess(torch.norm(res1 - res2), 1e-4) + self.assertLess(torch.norm(res1 - res2), 1e-3) if use_keops: self.assertTrue(keops_mock.called) @@ -115,7 +115,7 @@ def test_gradient(self, use_keops=True, ard=False, **kwargs): # stack all gradients into a tensor grad_s1 = torch.vstack(torch.autograd.grad(s1, [*kern1.hyperparameters()])) grad_s2 = torch.vstack(torch.autograd.grad(s2, [*kern2.hyperparameters()])) - self.assertAllClose(grad_s1, grad_s2, rtol=1e-4, atol=1e-5) + self.assertAllClose(grad_s1, grad_s2, rtol=1e-3, atol=1e-3) if use_keops: self.assertTrue(keops_mock.called) diff --git a/setup.py b/setup.py index f86a41a7c..df580c8aa 100644 --- a/setup.py +++ b/setup.py @@ -39,6 +39,7 @@ def find_version(*file_paths): torch_min = "1.11" install_requires = [ + "mpmath>=0.19,<=1.3", # avoid incompatibiltiy with torch+sympy with mpmath 1.4 "scikit-learn", "scipy", "linear_operator>=0.5.2", diff --git a/test/examples/test_svgp_gp_classification.py b/test/examples/test_svgp_gp_classification.py index 1645b8c70..8a6efe689 100644 --- a/test/examples/test_svgp_gp_classification.py +++ b/test/examples/test_svgp_gp_classification.py @@ -16,7 +16,7 @@ def train_data(cuda=False): - train_x = torch.linspace(0, 1, 260) + train_x = torch.linspace(0, 1, 150) train_y = torch.cos(train_x * (2 * math.pi)).gt(0).float() if cuda: return train_x.cuda(), train_y.cuda() @@ -49,7 +49,7 @@ class TestSVGPClassification(BaseTestCase, unittest.TestCase): def test_classification_error(self, cuda=False, mll_cls=gpytorch.mlls.VariationalELBO): train_x, train_y = train_data(cuda=cuda) likelihood = BernoulliLikelihood() - model = SVGPClassificationModel(torch.linspace(0, 1, 25)) + model = SVGPClassificationModel(torch.linspace(0, 1, 64)) mll = mll_cls(likelihood, model, num_data=len(train_y)) if cuda: likelihood = likelihood.cuda() @@ -59,12 +59,12 @@ def test_classification_error(self, cuda=False, mll_cls=gpytorch.mlls.Variationa # Find optimal model hyperparameters model.train() likelihood.train() - optimizer = optim.Adam([{"params": model.parameters()}, {"params": likelihood.parameters()}], lr=0.1) + optimizer = optim.Adam([{"params": model.parameters()}, {"params": likelihood.parameters()}], lr=0.03) _wrapped_cg = MagicMock(wraps=linear_operator.utils.linear_cg) _cg_mock = patch("linear_operator.utils.linear_cg", new=_wrapped_cg) with _cg_mock as cg_mock: - for _ in range(400): + for _ in range(100): optimizer.zero_grad() output = model(train_x) loss = -mll(output, train_y) From 89dfb46655affd12ed60d7a94ec9afa355b15e4c Mon Sep 17 00:00:00 2001 From: Sebastian Ament Date: Thu, 18 Apr 2024 15:56:49 -0400 Subject: [PATCH 09/11] ConstantKernel --- docs/source/kernels.rst | 9 +- gpytorch/kernels/__init__.py | 2 + gpytorch/kernels/constant_kernel.py | 123 +++++++++++++++++++++++++ gpytorch/test/base_kernel_test_case.py | 10 +- setup.py | 1 + test/kernels/test_constant_kernel.py | 113 +++++++++++++++++++++++ 6 files changed, 251 insertions(+), 7 deletions(-) create mode 100644 gpytorch/kernels/constant_kernel.py create mode 100644 test/kernels/test_constant_kernel.py diff --git a/docs/source/kernels.rst b/docs/source/kernels.rst index 5c7ae0945..5fa89b916 100644 --- a/docs/source/kernels.rst +++ b/docs/source/kernels.rst @@ -9,7 +9,7 @@ gpytorch.kernels If you don't know what kernel to use, we recommend that you start out with a -:code:`gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())`. +:code:`gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()) + gpytorch.kernel.ConstantKernel()`. Kernel @@ -22,6 +22,13 @@ Kernel Standard Kernels ----------------------------- +:hidden:`ConstantKernel` +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: ConstantKernel + :members: + + :hidden:`CosineKernel` ~~~~~~~~~~~~~~~~~~~~~~ diff --git a/gpytorch/kernels/__init__.py b/gpytorch/kernels/__init__.py index cc85fe624..1d87e764b 100644 --- a/gpytorch/kernels/__init__.py +++ b/gpytorch/kernels/__init__.py @@ -2,6 +2,7 @@ from . import keops from .additive_structure_kernel import AdditiveStructureKernel from .arc_kernel import ArcKernel +from .constant_kernel import ConstantKernel from .cosine_kernel import CosineKernel from .cylindrical_kernel import CylindricalKernel from .distributional_input_kernel import DistributionalInputKernel @@ -38,6 +39,7 @@ "ArcKernel", "AdditiveKernel", "AdditiveStructureKernel", + "ConstantKernel", "CylindricalKernel", "MultiDeviceKernel", "CosineKernel", diff --git a/gpytorch/kernels/constant_kernel.py b/gpytorch/kernels/constant_kernel.py new file mode 100644 index 000000000..98a3560e2 --- /dev/null +++ b/gpytorch/kernels/constant_kernel.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python3 + +from typing import Optional, Tuple + +import torch +from torch import Tensor + +from ..constraints import Interval, Positive +from ..priors import Prior +from .kernel import Kernel + + +class ConstantKernel(Kernel): + """ + Constant covariance kernel for the probabilistic inference of constant coefficients. + + ConstantKernel represents the prior variance `k(x1, x2) = var(c)` of a constant `c`. + The prior variance of the constant is optimized during the GP hyper-parameter + optimization stage. The actual value of the constant is computed (implicitly) using + the linear algebraic approaches for the computation of GP samples and posteriors. + + The constant kernel `k_constant` is most useful as a modification of an arbitrary + base kernel `k_base`: + 1) Additive constants: The modification `k_base + k_constant` allows the GP to + infer a non-zero asymptotic value far from the training data, which generally + leads to more accurate extrapolation. Notably, the uncertainty in this constant + value affects the posterior covariances through the posterior inference equations. + This is not the case when a constant prior mean is not used, since the prior mean + does not show up the posterior covariance and is regularized by the log-determinant + during the optimization of the marginal likelihood. + 2) Multiplicative constants: The modification `k_base * k_constant` allows the GP to + modulate the variance of the kernel `k_base`, and is mathematically identical to + `ScaleKernel(base_kernel)` with the same constant. + """ + + has_lengthscale = False + + def __init__( + self, + batch_shape: Optional[torch.Size] = None, + constant_prior: Optional[Prior] = None, + constant_constraint: Optional[Interval] = None, + active_dims: Optional[Tuple[int, ...]] = None, + ): + """Constructor of ConstantKernel. + + Args: + batch_shape: The batch shape of the kernel. + constant_prior: Prior over the constant parameter. + constant_constraint: Constraint to place on constant parameter. + active_dims: The dimensions of the input with which to evaluate the kernel. + This is mute for the constant kernel, but added for compatability with + the Kernel API. + """ + super().__init__(batch_shape=batch_shape, active_dims=active_dims) + + self.register_parameter( + name="raw_constant", + parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)), + ) + + if constant_prior is not None: + if not isinstance(constant_prior, Prior): + raise TypeError("Expected gpytorch.priors.Prior but got " + type(constant_prior).__name__) + self.register_prior( + "constant_prior", + constant_prior, + lambda m: m.constant, + lambda m, v: m._set_constant(v), + ) + + if constant_constraint is None: + constant_constraint = Positive() + self.register_constraint("raw_constant", constant_constraint) + + @property + def constant(self) -> Tensor: + return self.raw_constant_constraint.transform(self.raw_constant) + + @constant.setter + def constant(self, value: Tensor) -> None: + self._set_constant(value) + + def _set_constant(self, value: Tensor) -> None: + value = value.view(*self.batch_shape, 1) + self.initialize(raw_constant=self.raw_constant_constraint.inverse_transform(value)) + + def forward( + self, + x1: Tensor, + x2: Tensor, + diag: Optional[bool] = False, + last_dim_is_batch: Optional[bool] = False, + ) -> Tensor: + """Evaluates the constant kernel. + + Args: + x1: First input tensor of shape (batch_shape x n1 x d). + x2: Second input tensor of shape (batch_shape x n2 x d). + diag: If True, returns the diagonal of the covariance matrix. + last_dim_is_batch: If True, the last dimension of size `d` of the input + tensors are treated as a batch dimension. + + Returns: + A (batch_shape x n1 x n2)-dim, resp. (batch_shape x n1)-dim, tensor of + constant covariance values if diag is False, resp. True. + """ + if last_dim_is_batch: + x1 = x1.transpose(-1, -2).unsqueeze(-1) + x2 = x2.transpose(-1, -2).unsqueeze(-1) + + dtype = torch.promote_types(x1.dtype, x2.dtype) + batch_shape = torch.broadcast_shapes(x1.shape[:-2], x2.shape[:-2]) + shape = batch_shape + (x1.shape[-2],) + (() if diag else (x2.shape[-2],)) + constant = self.constant.to(dtype=dtype, device=x1.device) + + if not diag: + constant = constant.unsqueeze(-1) + + if last_dim_is_batch: + constant = constant.unsqueeze(-1) + + return constant.expand(shape) diff --git a/gpytorch/test/base_kernel_test_case.py b/gpytorch/test/base_kernel_test_case.py index 5301ce2d9..88f6afbd5 100644 --- a/gpytorch/test/base_kernel_test_case.py +++ b/gpytorch/test/base_kernel_test_case.py @@ -122,23 +122,21 @@ def test_no_batch_kernel_double_batch_x_ard(self): actual_diag = actual_covar_mat.diagonal(dim1=-1, dim2=-2) self.assertAllClose(kernel_diag, actual_diag, rtol=1e-3, atol=1e-5) - def test_smoke_double_batch_kernel_double_batch_x_no_ard(self): + def test_smoke_double_batch_kernel_double_batch_x_no_ard(self) -> None: kernel = self.create_kernel_no_ard(batch_shape=torch.Size([3, 2])) x = self.create_data_double_batch() - batch_covar_mat = kernel(x).evaluate_kernel().to_dense() + kernel(x).evaluate_kernel().to_dense() kernel(x, diag=True) - return batch_covar_mat - def test_smoke_double_batch_kernel_double_batch_x_ard(self): + def test_smoke_double_batch_kernel_double_batch_x_ard(self) -> None: try: kernel = self.create_kernel_ard(num_dims=2, batch_shape=torch.Size([3, 2])) except NotImplementedError: return x = self.create_data_double_batch() - batch_covar_mat = kernel(x).evaluate_kernel().to_dense() + kernel(x).evaluate_kernel().to_dense() kernel(x, diag=True) - return batch_covar_mat def test_kernel_getitem_single_batch(self): kernel = self.create_kernel_no_ard(batch_shape=torch.Size([2])) diff --git a/setup.py b/setup.py index df580c8aa..d5a05fbe9 100644 --- a/setup.py +++ b/setup.py @@ -82,6 +82,7 @@ def find_version(*file_paths): "nbclient<=0.7.3", "nbformat<=5.8.0", "nbsphinx<=0.9.1", + "lxml_html_clean", "platformdirs<=3.2.0", "setuptools_scm<=7.1.0", "sphinx<=6.2.1", diff --git a/test/kernels/test_constant_kernel.py b/test/kernels/test_constant_kernel.py new file mode 100644 index 000000000..849ec3996 --- /dev/null +++ b/test/kernels/test_constant_kernel.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 + +import itertools +import unittest + +import torch + +from torch import Tensor + +from gpytorch.kernels import AdditiveKernel, ConstantKernel, MaternKernel, ProductKernel, ScaleKernel +from gpytorch.lazy import LazyEvaluatedKernelTensor +from gpytorch.priors.torch_priors import GammaPrior +from gpytorch.test.base_kernel_test_case import BaseKernelTestCase + + +class TestConstantKernel(unittest.TestCase, BaseKernelTestCase): + def create_kernel_no_ard(self, **kwargs): + return ConstantKernel(**kwargs) + + def test_constant_kernel(self): + with self.subTest(device="cpu"): + self._test_constant_kernel(torch.device("cpu")) + + if torch.cuda.is_available(): + with self.subTest(device="cuda"): + self._test_constant_kernel(torch.device("cuda")) + + def _test_constant_kernel(self, device: torch.device): + n, d = 3, 5 + dtypes = [torch.float, torch.double] + batch_shapes = [(), (2,), (7, 2)] + torch.manual_seed(123) + for dtype, batch_shape in itertools.product(dtypes, batch_shapes): + tkwargs = {"dtype": dtype, "device": device} + places = 6 if dtype == torch.float else 12 + X = torch.rand(*batch_shape, n, d, **tkwargs) + + constant_kernel = ConstantKernel(batch_shape=batch_shape) + KL = constant_kernel(X) + self.assertIsInstance(KL, LazyEvaluatedKernelTensor) + KM = KL.to_dense() + self.assertIsInstance(KM, Tensor) + self.assertEqual(KM.shape, (*batch_shape, n, n)) + self.assertEqual(KM.dtype, dtype) + self.assertEqual(KM.device.type, device.type) + # standard deviation is zero iff KM is constant + self.assertAlmostEqual(KM.std().item(), 0, places=places) + + # testing last_dim_is_batch + with self.subTest(last_dim_is_batch=True): + KD = constant_kernel(X, last_dim_is_batch=True).to(device=device) + self.assertIsInstance(KD, LazyEvaluatedKernelTensor) + KM = KD.to_dense() + self.assertIsInstance(KM, Tensor) + self.assertEqual(KM.shape, (*batch_shape, d, n, n)) + self.assertAlmostEqual(KM.std().item(), 0, places=places) + self.assertEqual(KM.dtype, dtype) + self.assertEqual(KM.device.type, device.type) + + # testing diag + with self.subTest(diag=True): + KD = constant_kernel(X, diag=True) + self.assertIsInstance(KD, Tensor) + self.assertEqual(KD.shape, (*batch_shape, n)) + self.assertAlmostEqual(KD.std().item(), 0, places=places) + self.assertEqual(KD.dtype, dtype) + self.assertEqual(KD.device.type, device.type) + + # testing diag and last_dim_is_batch + with self.subTest(diag=True, last_dim_is_batch=True): + KD = constant_kernel(X, diag=True, last_dim_is_batch=True) + self.assertIsInstance(KD, Tensor) + self.assertEqual(KD.shape, (*batch_shape, d, n)) + self.assertAlmostEqual(KD.std().item(), 0, places=places) + self.assertEqual(KD.dtype, dtype) + self.assertEqual(KD.device.type, device.type) + + # testing AD + with self.subTest(requires_grad=True): + X.requires_grad = True + constant_kernel(X).to_dense().sum().backward() + self.assertIsNone(X.grad) # constant kernel is not dependent on X + + # testing algebraic combinations with another kernel + base_kernel = MaternKernel().to(device=device) + + with self.subTest(additive=True): + sum_kernel = base_kernel + constant_kernel + self.assertIsInstance(sum_kernel, AdditiveKernel) + self.assertAllClose( + sum_kernel(X).to_dense(), + base_kernel(X).to_dense() + constant_kernel.constant.unsqueeze(-1), + ) + + # product with constant is equivalent to scale kernel + with self.subTest(product=True): + product_kernel = base_kernel * constant_kernel + self.assertIsInstance(product_kernel, ProductKernel) + + scale_kernel = ScaleKernel(base_kernel, batch_shape=batch_shape) + scale_kernel.to(device=device) + self.assertAllClose(scale_kernel(X).to_dense(), product_kernel(X).to_dense()) + + # setting constant + pies = torch.full_like(constant_kernel.constant, torch.pi) + constant_kernel.constant = pies + self.assertAllClose(constant_kernel.constant, pies) + + # specifying prior + constant_kernel = ConstantKernel(constant_prior=GammaPrior(concentration=2.4, rate=2.7)) + + with self.assertRaisesRegex(TypeError, "Expected gpytorch.priors.Prior but got"): + ConstantKernel(constant_prior=1) From 489cc1c3d91c7d63eaa82cda7a95826c7ebd01e4 Mon Sep 17 00:00:00 2001 From: partev Date: Sat, 20 Apr 2024 12:01:35 -0400 Subject: [PATCH 10/11] Update periodic_kernel.py fix broken URL to an article --- gpytorch/kernels/periodic_kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpytorch/kernels/periodic_kernel.py b/gpytorch/kernels/periodic_kernel.py index 1232b96ae..e32401f0e 100644 --- a/gpytorch/kernels/periodic_kernel.py +++ b/gpytorch/kernels/periodic_kernel.py @@ -78,7 +78,7 @@ class PeriodicKernel(Kernel): >>> covar = covar_module(x) # Output: LazyVariable of size (2 x 10 x 10) .. _David Mackay's Introduction to Gaussian Processes equation 47: - http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.81.1927&rep=rep1&type=pdf + https://citeseerx.ist.psu.edu/document?repid=rep1&type=pdf&doi=e045b76dc5daf9f4656ac10b456c5d1d9de5bc84 """ has_lengthscale = True From cc49c499647df13e904dd728a70dfa5cf730f9eb Mon Sep 17 00:00:00 2001 From: partev Date: Sun, 21 Apr 2024 14:15:25 -0400 Subject: [PATCH 11/11] Update periodic_kernel.py use original author's URL per suggestion --- gpytorch/kernels/periodic_kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpytorch/kernels/periodic_kernel.py b/gpytorch/kernels/periodic_kernel.py index e32401f0e..2972b523a 100644 --- a/gpytorch/kernels/periodic_kernel.py +++ b/gpytorch/kernels/periodic_kernel.py @@ -78,7 +78,7 @@ class PeriodicKernel(Kernel): >>> covar = covar_module(x) # Output: LazyVariable of size (2 x 10 x 10) .. _David Mackay's Introduction to Gaussian Processes equation 47: - https://citeseerx.ist.psu.edu/document?repid=rep1&type=pdf&doi=e045b76dc5daf9f4656ac10b456c5d1d9de5bc84 + https://inference.org.uk/mackay/gpB.pdf """ has_lengthscale = True