Uploaded Test files
This commit is contained in:
parent
f584ad9d97
commit
2e81cb7d99
16627 changed files with 2065359 additions and 102444 deletions
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,51 @@
|
|||
from sklearn.gaussian_process.kernels import Kernel, Hyperparameter
|
||||
from sklearn.gaussian_process.kernels import GenericKernelMixin
|
||||
from sklearn.gaussian_process.kernels import StationaryKernelMixin
|
||||
import numpy as np
|
||||
from sklearn.base import clone
|
||||
|
||||
|
||||
class MiniSeqKernel(GenericKernelMixin,
|
||||
StationaryKernelMixin,
|
||||
Kernel):
|
||||
'''
|
||||
A minimal (but valid) convolutional kernel for sequences of variable
|
||||
length.
|
||||
'''
|
||||
def __init__(self,
|
||||
baseline_similarity=0.5,
|
||||
baseline_similarity_bounds=(1e-5, 1)):
|
||||
self.baseline_similarity = baseline_similarity
|
||||
self.baseline_similarity_bounds = baseline_similarity_bounds
|
||||
|
||||
@property
|
||||
def hyperparameter_baseline_similarity(self):
|
||||
return Hyperparameter("baseline_similarity",
|
||||
"numeric",
|
||||
self.baseline_similarity_bounds)
|
||||
|
||||
def _f(self, s1, s2):
|
||||
return sum([1.0 if c1 == c2 else self.baseline_similarity
|
||||
for c1 in s1
|
||||
for c2 in s2])
|
||||
|
||||
def _g(self, s1, s2):
|
||||
return sum([0.0 if c1 == c2 else 1.0 for c1 in s1 for c2 in s2])
|
||||
|
||||
def __call__(self, X, Y=None, eval_gradient=False):
|
||||
if Y is None:
|
||||
Y = X
|
||||
|
||||
if eval_gradient:
|
||||
return (np.array([[self._f(x, y) for y in Y] for x in X]),
|
||||
np.array([[[self._g(x, y)] for y in Y] for x in X]))
|
||||
else:
|
||||
return np.array([[self._f(x, y) for y in Y] for x in X])
|
||||
|
||||
def diag(self, X):
|
||||
return np.array([self._f(x, x) for x in X])
|
||||
|
||||
def clone_with_theta(self, theta):
|
||||
cloned = clone(self)
|
||||
cloned.theta = theta
|
||||
return cloned
|
|
@ -0,0 +1,182 @@
|
|||
"""Testing for Gaussian process classification """
|
||||
|
||||
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
|
||||
# License: BSD 3 clause
|
||||
|
||||
import numpy as np
|
||||
|
||||
from scipy.optimize import approx_fprime
|
||||
|
||||
import pytest
|
||||
|
||||
from sklearn.gaussian_process import GaussianProcessClassifier
|
||||
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
|
||||
from sklearn.gaussian_process.tests._mini_sequence_kernel import MiniSeqKernel
|
||||
|
||||
from sklearn.utils._testing import assert_almost_equal, assert_array_equal
|
||||
|
||||
|
||||
def f(x):
|
||||
return np.sin(x)
|
||||
|
||||
|
||||
X = np.atleast_2d(np.linspace(0, 10, 30)).T
|
||||
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
|
||||
y = np.array(f(X).ravel() > 0, dtype=int)
|
||||
fX = f(X).ravel()
|
||||
y_mc = np.empty(y.shape, dtype=int) # multi-class
|
||||
y_mc[fX < -0.35] = 0
|
||||
y_mc[(fX >= -0.35) & (fX < 0.35)] = 1
|
||||
y_mc[fX > 0.35] = 2
|
||||
|
||||
|
||||
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
|
||||
kernels = [RBF(length_scale=0.1), fixed_kernel,
|
||||
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
|
||||
C(1.0, (1e-2, 1e2)) *
|
||||
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))]
|
||||
non_fixed_kernels = [kernel for kernel in kernels
|
||||
if kernel != fixed_kernel]
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_predict_consistent(kernel):
|
||||
# Check binary predict decision has also predicted probability above 0.5.
|
||||
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
|
||||
assert_array_equal(gpc.predict(X),
|
||||
gpc.predict_proba(X)[:, 1] >= 0.5)
|
||||
|
||||
|
||||
def test_predict_consistent_structured():
|
||||
# Check binary predict decision has also predicted probability above 0.5.
|
||||
X = ['A', 'AB', 'B']
|
||||
y = np.array([True, False, True])
|
||||
kernel = MiniSeqKernel(baseline_similarity_bounds='fixed')
|
||||
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
|
||||
assert_array_equal(gpc.predict(X),
|
||||
gpc.predict_proba(X)[:, 1] >= 0.5)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', non_fixed_kernels)
|
||||
def test_lml_improving(kernel):
|
||||
# Test that hyperparameter-tuning improves log-marginal likelihood.
|
||||
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
|
||||
assert (gpc.log_marginal_likelihood(gpc.kernel_.theta) >
|
||||
gpc.log_marginal_likelihood(kernel.theta))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_lml_precomputed(kernel):
|
||||
# Test that lml of optimized kernel is stored correctly.
|
||||
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
|
||||
assert_almost_equal(gpc.log_marginal_likelihood(gpc.kernel_.theta),
|
||||
gpc.log_marginal_likelihood(), 7)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_lml_without_cloning_kernel(kernel):
|
||||
# Test that clone_kernel=False has side-effects of kernel.theta.
|
||||
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
|
||||
input_theta = np.ones(gpc.kernel_.theta.shape, dtype=np.float64)
|
||||
|
||||
gpc.log_marginal_likelihood(input_theta, clone_kernel=False)
|
||||
assert_almost_equal(gpc.kernel_.theta, input_theta, 7)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', non_fixed_kernels)
|
||||
def test_converged_to_local_maximum(kernel):
|
||||
# Test that we are in local maximum after hyperparameter-optimization.
|
||||
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
|
||||
|
||||
lml, lml_gradient = \
|
||||
gpc.log_marginal_likelihood(gpc.kernel_.theta, True)
|
||||
|
||||
assert np.all((np.abs(lml_gradient) < 1e-4) |
|
||||
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 0]) |
|
||||
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 1]))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_lml_gradient(kernel):
|
||||
# Compare analytic and numeric gradient of log marginal likelihood.
|
||||
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
|
||||
|
||||
lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True)
|
||||
lml_gradient_approx = \
|
||||
approx_fprime(kernel.theta,
|
||||
lambda theta: gpc.log_marginal_likelihood(theta,
|
||||
False),
|
||||
1e-10)
|
||||
|
||||
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
|
||||
|
||||
|
||||
def test_random_starts():
|
||||
# Test that an increasing number of random-starts of GP fitting only
|
||||
# increases the log marginal likelihood of the chosen theta.
|
||||
n_samples, n_features = 25, 2
|
||||
rng = np.random.RandomState(0)
|
||||
X = rng.randn(n_samples, n_features) * 2 - 1
|
||||
y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0
|
||||
|
||||
kernel = C(1.0, (1e-2, 1e2)) \
|
||||
* RBF(length_scale=[1e-3] * n_features,
|
||||
length_scale_bounds=[(1e-4, 1e+2)] * n_features)
|
||||
last_lml = -np.inf
|
||||
for n_restarts_optimizer in range(5):
|
||||
gp = GaussianProcessClassifier(
|
||||
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
|
||||
random_state=0).fit(X, y)
|
||||
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
|
||||
assert lml > last_lml - np.finfo(np.float32).eps
|
||||
last_lml = lml
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', non_fixed_kernels)
|
||||
def test_custom_optimizer(kernel):
|
||||
# Test that GPC can use externally defined optimizers.
|
||||
# Define a dummy optimizer that simply tests 10 random hyperparameters
|
||||
def optimizer(obj_func, initial_theta, bounds):
|
||||
rng = np.random.RandomState(0)
|
||||
theta_opt, func_min = \
|
||||
initial_theta, obj_func(initial_theta, eval_gradient=False)
|
||||
for _ in range(10):
|
||||
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
|
||||
np.minimum(1, bounds[:, 1])))
|
||||
f = obj_func(theta, eval_gradient=False)
|
||||
if f < func_min:
|
||||
theta_opt, func_min = theta, f
|
||||
return theta_opt, func_min
|
||||
|
||||
gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer)
|
||||
gpc.fit(X, y_mc)
|
||||
# Checks that optimizer improved marginal likelihood
|
||||
assert (gpc.log_marginal_likelihood(gpc.kernel_.theta) >
|
||||
gpc.log_marginal_likelihood(kernel.theta))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_multi_class(kernel):
|
||||
# Test GPC for multi-class classification problems.
|
||||
gpc = GaussianProcessClassifier(kernel=kernel)
|
||||
gpc.fit(X, y_mc)
|
||||
|
||||
y_prob = gpc.predict_proba(X2)
|
||||
assert_almost_equal(y_prob.sum(1), 1)
|
||||
|
||||
y_pred = gpc.predict(X2)
|
||||
assert_array_equal(np.argmax(y_prob, 1), y_pred)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_multi_class_n_jobs(kernel):
|
||||
# Test that multi-class GPC produces identical results with n_jobs>1.
|
||||
gpc = GaussianProcessClassifier(kernel=kernel)
|
||||
gpc.fit(X, y_mc)
|
||||
|
||||
gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
|
||||
gpc_2.fit(X, y_mc)
|
||||
|
||||
y_prob = gpc.predict_proba(X2)
|
||||
y_prob_2 = gpc_2.predict_proba(X2)
|
||||
assert_almost_equal(y_prob, y_prob_2)
|
|
@ -0,0 +1,469 @@
|
|||
"""Testing for Gaussian process regression """
|
||||
|
||||
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
|
||||
# Modified by: Pete Green <p.l.green@liverpool.ac.uk>
|
||||
# License: BSD 3 clause
|
||||
|
||||
import sys
|
||||
import numpy as np
|
||||
|
||||
from scipy.optimize import approx_fprime
|
||||
|
||||
import pytest
|
||||
|
||||
from sklearn.gaussian_process import GaussianProcessRegressor
|
||||
from sklearn.gaussian_process.kernels \
|
||||
import RBF, ConstantKernel as C, WhiteKernel
|
||||
from sklearn.gaussian_process.kernels import DotProduct
|
||||
from sklearn.gaussian_process.tests._mini_sequence_kernel import MiniSeqKernel
|
||||
|
||||
from sklearn.utils._testing \
|
||||
import (assert_array_less,
|
||||
assert_almost_equal, assert_raise_message,
|
||||
assert_array_almost_equal, assert_array_equal,
|
||||
assert_allclose)
|
||||
|
||||
|
||||
def f(x):
|
||||
return x * np.sin(x)
|
||||
|
||||
|
||||
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
|
||||
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
|
||||
y = f(X).ravel()
|
||||
|
||||
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
|
||||
kernels = [RBF(length_scale=1.0), fixed_kernel,
|
||||
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
|
||||
C(1.0, (1e-2, 1e2)) *
|
||||
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
|
||||
C(1.0, (1e-2, 1e2)) *
|
||||
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
|
||||
C(1e-5, (1e-5, 1e2)),
|
||||
C(0.1, (1e-2, 1e2)) *
|
||||
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
|
||||
C(1e-5, (1e-5, 1e2))]
|
||||
non_fixed_kernels = [kernel for kernel in kernels
|
||||
if kernel != fixed_kernel]
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_gpr_interpolation(kernel):
|
||||
if sys.maxsize <= 2 ** 32 and sys.version_info[:2] == (3, 6):
|
||||
pytest.xfail("This test may fail on 32bit Py3.6")
|
||||
|
||||
# Test the interpolating property for different kernels.
|
||||
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
|
||||
y_pred, y_cov = gpr.predict(X, return_cov=True)
|
||||
|
||||
assert_almost_equal(y_pred, y)
|
||||
assert_almost_equal(np.diag(y_cov), 0.)
|
||||
|
||||
|
||||
def test_gpr_interpolation_structured():
|
||||
# Test the interpolating property for different kernels.
|
||||
kernel = MiniSeqKernel(baseline_similarity_bounds='fixed')
|
||||
X = ['A', 'B', 'C']
|
||||
y = np.array([1, 2, 3])
|
||||
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
|
||||
y_pred, y_cov = gpr.predict(X, return_cov=True)
|
||||
|
||||
assert_almost_equal(kernel(X, eval_gradient=True)[1].ravel(),
|
||||
(1 - np.eye(len(X))).ravel())
|
||||
assert_almost_equal(y_pred, y)
|
||||
assert_almost_equal(np.diag(y_cov), 0.)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', non_fixed_kernels)
|
||||
def test_lml_improving(kernel):
|
||||
if sys.maxsize <= 2 ** 32 and sys.version_info[:2] == (3, 6):
|
||||
pytest.xfail("This test may fail on 32bit Py3.6")
|
||||
|
||||
# Test that hyperparameter-tuning improves log-marginal likelihood.
|
||||
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
|
||||
assert (gpr.log_marginal_likelihood(gpr.kernel_.theta) >
|
||||
gpr.log_marginal_likelihood(kernel.theta))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_lml_precomputed(kernel):
|
||||
# Test that lml of optimized kernel is stored correctly.
|
||||
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
|
||||
assert (gpr.log_marginal_likelihood(gpr.kernel_.theta) ==
|
||||
gpr.log_marginal_likelihood())
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_lml_without_cloning_kernel(kernel):
|
||||
# Test that lml of optimized kernel is stored correctly.
|
||||
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
|
||||
input_theta = np.ones(gpr.kernel_.theta.shape, dtype=np.float64)
|
||||
|
||||
gpr.log_marginal_likelihood(input_theta, clone_kernel=False)
|
||||
assert_almost_equal(gpr.kernel_.theta, input_theta, 7)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', non_fixed_kernels)
|
||||
def test_converged_to_local_maximum(kernel):
|
||||
# Test that we are in local maximum after hyperparameter-optimization.
|
||||
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
|
||||
|
||||
lml, lml_gradient = \
|
||||
gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
|
||||
|
||||
assert np.all((np.abs(lml_gradient) < 1e-4) |
|
||||
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 0]) |
|
||||
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 1]))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', non_fixed_kernels)
|
||||
def test_solution_inside_bounds(kernel):
|
||||
# Test that hyperparameter-optimization remains in bounds#
|
||||
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
|
||||
|
||||
bounds = gpr.kernel_.bounds
|
||||
max_ = np.finfo(gpr.kernel_.theta.dtype).max
|
||||
tiny = 1e-10
|
||||
bounds[~np.isfinite(bounds[:, 1]), 1] = max_
|
||||
|
||||
assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
|
||||
assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_lml_gradient(kernel):
|
||||
# Compare analytic and numeric gradient of log marginal likelihood.
|
||||
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
|
||||
|
||||
lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
|
||||
lml_gradient_approx = \
|
||||
approx_fprime(kernel.theta,
|
||||
lambda theta: gpr.log_marginal_likelihood(theta,
|
||||
False),
|
||||
1e-10)
|
||||
|
||||
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_prior(kernel):
|
||||
# Test that GP prior has mean 0 and identical variances.
|
||||
gpr = GaussianProcessRegressor(kernel=kernel)
|
||||
|
||||
y_mean, y_cov = gpr.predict(X, return_cov=True)
|
||||
|
||||
assert_almost_equal(y_mean, 0, 5)
|
||||
if len(gpr.kernel.theta) > 1:
|
||||
# XXX: quite hacky, works only for current kernels
|
||||
assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
|
||||
else:
|
||||
assert_almost_equal(np.diag(y_cov), 1, 5)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_sample_statistics(kernel):
|
||||
# Test that statistics of samples drawn from GP are correct.
|
||||
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
|
||||
|
||||
y_mean, y_cov = gpr.predict(X2, return_cov=True)
|
||||
|
||||
samples = gpr.sample_y(X2, 300000)
|
||||
|
||||
# More digits accuracy would require many more samples
|
||||
assert_almost_equal(y_mean, np.mean(samples, 1), 1)
|
||||
assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(),
|
||||
np.var(samples, 1) / np.diag(y_cov).max(), 1)
|
||||
|
||||
|
||||
def test_no_optimizer():
|
||||
# Test that kernel parameters are unmodified when optimizer is None.
|
||||
kernel = RBF(1.0)
|
||||
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
|
||||
assert np.exp(gpr.kernel_.theta) == 1.0
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_predict_cov_vs_std(kernel):
|
||||
if sys.maxsize <= 2 ** 32 and sys.version_info[:2] == (3, 6):
|
||||
pytest.xfail("This test may fail on 32bit Py3.6")
|
||||
|
||||
# Test that predicted std.-dev. is consistent with cov's diagonal.
|
||||
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
|
||||
y_mean, y_cov = gpr.predict(X2, return_cov=True)
|
||||
y_mean, y_std = gpr.predict(X2, return_std=True)
|
||||
assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
|
||||
|
||||
|
||||
def test_anisotropic_kernel():
|
||||
# Test that GPR can identify meaningful anisotropic length-scales.
|
||||
# We learn a function which varies in one dimension ten-times slower
|
||||
# than in the other. The corresponding length-scales should differ by at
|
||||
# least a factor 5
|
||||
rng = np.random.RandomState(0)
|
||||
X = rng.uniform(-1, 1, (50, 2))
|
||||
y = X[:, 0] + 0.1 * X[:, 1]
|
||||
|
||||
kernel = RBF([1.0, 1.0])
|
||||
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
|
||||
assert (np.exp(gpr.kernel_.theta[1]) >
|
||||
np.exp(gpr.kernel_.theta[0]) * 5)
|
||||
|
||||
|
||||
def test_random_starts():
|
||||
# Test that an increasing number of random-starts of GP fitting only
|
||||
# increases the log marginal likelihood of the chosen theta.
|
||||
n_samples, n_features = 25, 2
|
||||
rng = np.random.RandomState(0)
|
||||
X = rng.randn(n_samples, n_features) * 2 - 1
|
||||
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) \
|
||||
+ rng.normal(scale=0.1, size=n_samples)
|
||||
|
||||
kernel = C(1.0, (1e-2, 1e2)) \
|
||||
* RBF(length_scale=[1.0] * n_features,
|
||||
length_scale_bounds=[(1e-4, 1e+2)] * n_features) \
|
||||
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
|
||||
last_lml = -np.inf
|
||||
for n_restarts_optimizer in range(5):
|
||||
gp = GaussianProcessRegressor(
|
||||
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
|
||||
random_state=0,).fit(X, y)
|
||||
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
|
||||
assert lml > last_lml - np.finfo(np.float32).eps
|
||||
last_lml = lml
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_y_normalization(kernel):
|
||||
"""
|
||||
Test normalization of the target values in GP
|
||||
|
||||
Fitting non-normalizing GP on normalized y and fitting normalizing GP
|
||||
on unnormalized y should yield identical results. Note that, here,
|
||||
'normalized y' refers to y that has been made zero mean and unit
|
||||
variance.
|
||||
|
||||
"""
|
||||
|
||||
y_mean = np.mean(y)
|
||||
y_std = np.std(y)
|
||||
y_norm = (y - y_mean) / y_std
|
||||
|
||||
# Fit non-normalizing GP on normalized y
|
||||
gpr = GaussianProcessRegressor(kernel=kernel)
|
||||
gpr.fit(X, y_norm)
|
||||
|
||||
# Fit normalizing GP on unnormalized y
|
||||
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
|
||||
gpr_norm.fit(X, y)
|
||||
|
||||
# Compare predicted mean, std-devs and covariances
|
||||
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
|
||||
y_pred = y_pred * y_std + y_mean
|
||||
y_pred_std = y_pred_std * y_std
|
||||
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
|
||||
|
||||
assert_almost_equal(y_pred, y_pred_norm)
|
||||
assert_almost_equal(y_pred_std, y_pred_std_norm)
|
||||
|
||||
_, y_cov = gpr.predict(X2, return_cov=True)
|
||||
y_cov = y_cov * y_std**2
|
||||
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
|
||||
|
||||
assert_almost_equal(y_cov, y_cov_norm)
|
||||
|
||||
|
||||
def test_large_variance_y():
|
||||
"""
|
||||
Here we test that, when noramlize_y=True, our GP can produce a
|
||||
sensible fit to training data whose variance is significantly
|
||||
larger than unity. This test was made in response to issue #15612.
|
||||
|
||||
GP predictions are verified against predictions that were made
|
||||
using GPy which, here, is treated as the 'gold standard'. Note that we
|
||||
only investigate the RBF kernel here, as that is what was used in the
|
||||
GPy implementation.
|
||||
|
||||
The following code can be used to recreate the GPy data:
|
||||
|
||||
--------------------------------------------------------------------------
|
||||
import GPy
|
||||
|
||||
kernel_gpy = GPy.kern.RBF(input_dim=1, lengthscale=1.)
|
||||
gpy = GPy.models.GPRegression(X, np.vstack(y_large), kernel_gpy)
|
||||
gpy.optimize()
|
||||
y_pred_gpy, y_var_gpy = gpy.predict(X2)
|
||||
y_pred_std_gpy = np.sqrt(y_var_gpy)
|
||||
--------------------------------------------------------------------------
|
||||
"""
|
||||
|
||||
# Here we utilise a larger variance version of the training data
|
||||
y_large = 10 * y
|
||||
|
||||
# Standard GP with normalize_y=True
|
||||
RBF_params = {'length_scale': 1.0}
|
||||
kernel = RBF(**RBF_params)
|
||||
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
|
||||
gpr.fit(X, y_large)
|
||||
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
|
||||
|
||||
# 'Gold standard' mean predictions from GPy
|
||||
y_pred_gpy = np.array([15.16918303,
|
||||
-27.98707845,
|
||||
-39.31636019,
|
||||
14.52605515,
|
||||
69.18503589])
|
||||
|
||||
# 'Gold standard' std predictions from GPy
|
||||
y_pred_std_gpy = np.array([7.78860962,
|
||||
3.83179178,
|
||||
0.63149951,
|
||||
0.52745188,
|
||||
0.86170042])
|
||||
|
||||
# Based on numerical experiments, it's reasonable to expect our
|
||||
# GP's mean predictions to get within 7% of predictions of those
|
||||
# made by GPy.
|
||||
assert_allclose(y_pred, y_pred_gpy, rtol=0.07, atol=0)
|
||||
|
||||
# Based on numerical experiments, it's reasonable to expect our
|
||||
# GP's std predictions to get within 15% of predictions of those
|
||||
# made by GPy.
|
||||
assert_allclose(y_pred_std, y_pred_std_gpy, rtol=0.15, atol=0)
|
||||
|
||||
|
||||
def test_y_multioutput():
|
||||
# Test that GPR can deal with multi-dimensional target values
|
||||
y_2d = np.vstack((y, y * 2)).T
|
||||
|
||||
# Test for fixed kernel that first dimension of 2d GP equals the output
|
||||
# of 1d GP and that second dimension is twice as large
|
||||
kernel = RBF(length_scale=1.0)
|
||||
|
||||
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None,
|
||||
normalize_y=False)
|
||||
gpr.fit(X, y)
|
||||
|
||||
gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None,
|
||||
normalize_y=False)
|
||||
gpr_2d.fit(X, y_2d)
|
||||
|
||||
y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
|
||||
y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
|
||||
_, y_cov_1d = gpr.predict(X2, return_cov=True)
|
||||
_, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
|
||||
|
||||
assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
|
||||
assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
|
||||
|
||||
# Standard deviation and covariance do not depend on output
|
||||
assert_almost_equal(y_std_1d, y_std_2d)
|
||||
assert_almost_equal(y_cov_1d, y_cov_2d)
|
||||
|
||||
y_sample_1d = gpr.sample_y(X2, n_samples=10)
|
||||
y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
|
||||
assert_almost_equal(y_sample_1d, y_sample_2d[:, 0])
|
||||
|
||||
# Test hyperparameter optimization
|
||||
for kernel in kernels:
|
||||
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
|
||||
gpr.fit(X, y)
|
||||
|
||||
gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
|
||||
gpr_2d.fit(X, np.vstack((y, y)).T)
|
||||
|
||||
assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', non_fixed_kernels)
|
||||
def test_custom_optimizer(kernel):
|
||||
# Test that GPR can use externally defined optimizers.
|
||||
# Define a dummy optimizer that simply tests 50 random hyperparameters
|
||||
def optimizer(obj_func, initial_theta, bounds):
|
||||
rng = np.random.RandomState(0)
|
||||
theta_opt, func_min = \
|
||||
initial_theta, obj_func(initial_theta, eval_gradient=False)
|
||||
for _ in range(50):
|
||||
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
|
||||
np.minimum(1, bounds[:, 1])))
|
||||
f = obj_func(theta, eval_gradient=False)
|
||||
if f < func_min:
|
||||
theta_opt, func_min = theta, f
|
||||
return theta_opt, func_min
|
||||
|
||||
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
|
||||
gpr.fit(X, y)
|
||||
# Checks that optimizer improved marginal likelihood
|
||||
assert (gpr.log_marginal_likelihood(gpr.kernel_.theta) >
|
||||
gpr.log_marginal_likelihood(gpr.kernel.theta))
|
||||
|
||||
|
||||
def test_gpr_correct_error_message():
|
||||
X = np.arange(12).reshape(6, -1)
|
||||
y = np.ones(6)
|
||||
kernel = DotProduct()
|
||||
gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0)
|
||||
assert_raise_message(np.linalg.LinAlgError,
|
||||
"The kernel, %s, is not returning a "
|
||||
"positive definite matrix. Try gradually increasing "
|
||||
"the 'alpha' parameter of your "
|
||||
"GaussianProcessRegressor estimator."
|
||||
% kernel, gpr.fit, X, y)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_duplicate_input(kernel):
|
||||
# Test GPR can handle two different output-values for the same input.
|
||||
gpr_equal_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
|
||||
gpr_similar_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
|
||||
|
||||
X_ = np.vstack((X, X[0]))
|
||||
y_ = np.hstack((y, y[0] + 1))
|
||||
gpr_equal_inputs.fit(X_, y_)
|
||||
|
||||
X_ = np.vstack((X, X[0] + 1e-15))
|
||||
y_ = np.hstack((y, y[0] + 1))
|
||||
gpr_similar_inputs.fit(X_, y_)
|
||||
|
||||
X_test = np.linspace(0, 10, 100)[:, None]
|
||||
y_pred_equal, y_std_equal = \
|
||||
gpr_equal_inputs.predict(X_test, return_std=True)
|
||||
y_pred_similar, y_std_similar = \
|
||||
gpr_similar_inputs.predict(X_test, return_std=True)
|
||||
|
||||
assert_almost_equal(y_pred_equal, y_pred_similar)
|
||||
assert_almost_equal(y_std_equal, y_std_similar)
|
||||
|
||||
|
||||
def test_no_fit_default_predict():
|
||||
# Test that GPR predictions without fit does not break by default.
|
||||
default_kernel = (C(1.0, constant_value_bounds="fixed") *
|
||||
RBF(1.0, length_scale_bounds="fixed"))
|
||||
gpr1 = GaussianProcessRegressor()
|
||||
_, y_std1 = gpr1.predict(X, return_std=True)
|
||||
_, y_cov1 = gpr1.predict(X, return_cov=True)
|
||||
|
||||
gpr2 = GaussianProcessRegressor(kernel=default_kernel)
|
||||
_, y_std2 = gpr2.predict(X, return_std=True)
|
||||
_, y_cov2 = gpr2.predict(X, return_cov=True)
|
||||
|
||||
assert_array_almost_equal(y_std1, y_std2)
|
||||
assert_array_almost_equal(y_cov1, y_cov2)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_K_inv_reset(kernel):
|
||||
y2 = f(X2).ravel()
|
||||
|
||||
# Test that self._K_inv is reset after a new fit
|
||||
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
|
||||
assert hasattr(gpr, '_K_inv')
|
||||
assert gpr._K_inv is None
|
||||
gpr.predict(X, return_std=True)
|
||||
assert gpr._K_inv is not None
|
||||
gpr.fit(X2, y2)
|
||||
assert gpr._K_inv is None
|
||||
gpr.predict(X2, return_std=True)
|
||||
gpr2 = GaussianProcessRegressor(kernel=kernel).fit(X2, y2)
|
||||
gpr2.predict(X2, return_std=True)
|
||||
# the value of K_inv should be independent of the first fit
|
||||
assert_array_equal(gpr._K_inv, gpr2._K_inv)
|
|
@ -0,0 +1,385 @@
|
|||
"""Testing for kernels for Gaussian processes."""
|
||||
|
||||
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
|
||||
# License: BSD 3 clause
|
||||
|
||||
import pytest
|
||||
import numpy as np
|
||||
from inspect import signature
|
||||
|
||||
from sklearn.gaussian_process.kernels import _approx_fprime
|
||||
|
||||
from sklearn.metrics.pairwise \
|
||||
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
|
||||
from sklearn.gaussian_process.kernels \
|
||||
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
|
||||
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
|
||||
Exponentiation, Kernel, CompoundKernel)
|
||||
from sklearn.base import clone
|
||||
|
||||
from sklearn.utils._testing import (assert_almost_equal, assert_array_equal,
|
||||
assert_array_almost_equal,
|
||||
assert_allclose,
|
||||
assert_raise_message)
|
||||
|
||||
|
||||
X = np.random.RandomState(0).normal(0, 1, (5, 2))
|
||||
Y = np.random.RandomState(0).normal(0, 1, (6, 2))
|
||||
|
||||
kernel_rbf_plus_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
|
||||
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
|
||||
ConstantKernel(constant_value=10.0),
|
||||
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
|
||||
2.0 * RBF(length_scale=0.5), kernel_rbf_plus_white,
|
||||
2.0 * RBF(length_scale=[0.5, 2.0]),
|
||||
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
|
||||
2.0 * Matern(length_scale=0.5, nu=0.5),
|
||||
2.0 * Matern(length_scale=1.5, nu=1.5),
|
||||
2.0 * Matern(length_scale=2.5, nu=2.5),
|
||||
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
|
||||
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
|
||||
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
|
||||
RationalQuadratic(length_scale=0.5, alpha=1.5),
|
||||
ExpSineSquared(length_scale=0.5, periodicity=1.5),
|
||||
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2,
|
||||
RBF(length_scale=[2.0]), Matern(length_scale=[2.0])]
|
||||
for metric in PAIRWISE_KERNEL_FUNCTIONS:
|
||||
if metric in ["additive_chi2", "chi2"]:
|
||||
continue
|
||||
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_kernel_gradient(kernel):
|
||||
# Compare analytic and numeric gradient of kernels.
|
||||
K, K_gradient = kernel(X, eval_gradient=True)
|
||||
|
||||
assert K_gradient.shape[0] == X.shape[0]
|
||||
assert K_gradient.shape[1] == X.shape[0]
|
||||
assert K_gradient.shape[2] == kernel.theta.shape[0]
|
||||
|
||||
def eval_kernel_for_theta(theta):
|
||||
kernel_clone = kernel.clone_with_theta(theta)
|
||||
K = kernel_clone(X, eval_gradient=False)
|
||||
return K
|
||||
|
||||
K_gradient_approx = \
|
||||
_approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
|
||||
|
||||
assert_almost_equal(K_gradient, K_gradient_approx, 4)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'kernel',
|
||||
[kernel for kernel in kernels
|
||||
# skip non-basic kernels
|
||||
if not (isinstance(kernel, KernelOperator)
|
||||
or isinstance(kernel, Exponentiation))])
|
||||
def test_kernel_theta(kernel):
|
||||
# Check that parameter vector theta of kernel is set correctly.
|
||||
theta = kernel.theta
|
||||
_, K_gradient = kernel(X, eval_gradient=True)
|
||||
|
||||
# Determine kernel parameters that contribute to theta
|
||||
init_sign = signature(kernel.__class__.__init__).parameters.values()
|
||||
args = [p.name for p in init_sign if p.name != 'self']
|
||||
theta_vars = map(lambda s: s[0:-len("_bounds")],
|
||||
filter(lambda s: s.endswith("_bounds"), args))
|
||||
assert (
|
||||
set(hyperparameter.name
|
||||
for hyperparameter in kernel.hyperparameters) ==
|
||||
set(theta_vars))
|
||||
|
||||
# Check that values returned in theta are consistent with
|
||||
# hyperparameter values (being their logarithms)
|
||||
for i, hyperparameter in enumerate(kernel.hyperparameters):
|
||||
assert (theta[i] == np.log(getattr(kernel, hyperparameter.name)))
|
||||
|
||||
# Fixed kernel parameters must be excluded from theta and gradient.
|
||||
for i, hyperparameter in enumerate(kernel.hyperparameters):
|
||||
# create copy with certain hyperparameter fixed
|
||||
params = kernel.get_params()
|
||||
params[hyperparameter.name + "_bounds"] = "fixed"
|
||||
kernel_class = kernel.__class__
|
||||
new_kernel = kernel_class(**params)
|
||||
# Check that theta and K_gradient are identical with the fixed
|
||||
# dimension left out
|
||||
_, K_gradient_new = new_kernel(X, eval_gradient=True)
|
||||
assert theta.shape[0] == new_kernel.theta.shape[0] + 1
|
||||
assert K_gradient.shape[2] == K_gradient_new.shape[2] + 1
|
||||
if i > 0:
|
||||
assert theta[:i] == new_kernel.theta[:i]
|
||||
assert_array_equal(K_gradient[..., :i],
|
||||
K_gradient_new[..., :i])
|
||||
if i + 1 < len(kernel.hyperparameters):
|
||||
assert theta[i + 1:] == new_kernel.theta[i:]
|
||||
assert_array_equal(K_gradient[..., i + 1:],
|
||||
K_gradient_new[..., i:])
|
||||
|
||||
# Check that values of theta are modified correctly
|
||||
for i, hyperparameter in enumerate(kernel.hyperparameters):
|
||||
theta[i] = np.log(42)
|
||||
kernel.theta = theta
|
||||
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
|
||||
|
||||
setattr(kernel, hyperparameter.name, 43)
|
||||
assert_almost_equal(kernel.theta[i], np.log(43))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel',
|
||||
[kernel for kernel in kernels
|
||||
# Identity is not satisfied on diagonal
|
||||
if kernel != kernel_rbf_plus_white])
|
||||
def test_auto_vs_cross(kernel):
|
||||
# Auto-correlation and cross-correlation should be consistent.
|
||||
K_auto = kernel(X)
|
||||
K_cross = kernel(X, X)
|
||||
assert_almost_equal(K_auto, K_cross, 5)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_kernel_diag(kernel):
|
||||
# Test that diag method of kernel returns consistent results.
|
||||
K_call_diag = np.diag(kernel(X))
|
||||
K_diag = kernel.diag(X)
|
||||
assert_almost_equal(K_call_diag, K_diag, 5)
|
||||
|
||||
|
||||
def test_kernel_operator_commutative():
|
||||
# Adding kernels and multiplying kernels should be commutative.
|
||||
# Check addition
|
||||
assert_almost_equal((RBF(2.0) + 1.0)(X),
|
||||
(1.0 + RBF(2.0))(X))
|
||||
|
||||
# Check multiplication
|
||||
assert_almost_equal((3.0 * RBF(2.0))(X),
|
||||
(RBF(2.0) * 3.0)(X))
|
||||
|
||||
|
||||
def test_kernel_anisotropic():
|
||||
# Anisotropic kernel should be consistent with isotropic kernels.
|
||||
kernel = 3.0 * RBF([0.5, 2.0])
|
||||
|
||||
K = kernel(X)
|
||||
X1 = np.array(X)
|
||||
X1[:, 0] *= 4
|
||||
K1 = 3.0 * RBF(2.0)(X1)
|
||||
assert_almost_equal(K, K1)
|
||||
|
||||
X2 = np.array(X)
|
||||
X2[:, 1] /= 4
|
||||
K2 = 3.0 * RBF(0.5)(X2)
|
||||
assert_almost_equal(K, K2)
|
||||
|
||||
# Check getting and setting via theta
|
||||
kernel.theta = kernel.theta + np.log(2)
|
||||
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
|
||||
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel',
|
||||
[kernel for kernel in kernels
|
||||
if kernel.is_stationary()])
|
||||
def test_kernel_stationary(kernel):
|
||||
# Test stationarity of kernels.
|
||||
K = kernel(X, X + 1)
|
||||
assert_almost_equal(K[0, 0], np.diag(K))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_kernel_input_type(kernel):
|
||||
# Test whether kernels is for vectors or structured data
|
||||
if isinstance(kernel, Exponentiation):
|
||||
assert(kernel.requires_vector_input ==
|
||||
kernel.kernel.requires_vector_input)
|
||||
if isinstance(kernel, KernelOperator):
|
||||
assert(kernel.requires_vector_input ==
|
||||
(kernel.k1.requires_vector_input or
|
||||
kernel.k2.requires_vector_input))
|
||||
|
||||
|
||||
def test_compound_kernel_input_type():
|
||||
kernel = CompoundKernel([WhiteKernel(noise_level=3.0)])
|
||||
assert not kernel.requires_vector_input
|
||||
|
||||
kernel = CompoundKernel([WhiteKernel(noise_level=3.0),
|
||||
RBF(length_scale=2.0)])
|
||||
assert kernel.requires_vector_input
|
||||
|
||||
|
||||
def check_hyperparameters_equal(kernel1, kernel2):
|
||||
# Check that hyperparameters of two kernels are equal
|
||||
for attr in set(dir(kernel1) + dir(kernel2)):
|
||||
if attr.startswith("hyperparameter_"):
|
||||
attr_value1 = getattr(kernel1, attr)
|
||||
attr_value2 = getattr(kernel2, attr)
|
||||
assert attr_value1 == attr_value2
|
||||
|
||||
|
||||
@pytest.mark.parametrize("kernel", kernels)
|
||||
def test_kernel_clone(kernel):
|
||||
# Test that sklearn's clone works correctly on kernels.
|
||||
kernel_cloned = clone(kernel)
|
||||
|
||||
# XXX: Should this be fixed?
|
||||
# This differs from the sklearn's estimators equality check.
|
||||
assert kernel == kernel_cloned
|
||||
assert id(kernel) != id(kernel_cloned)
|
||||
|
||||
# Check that all constructor parameters are equal.
|
||||
assert kernel.get_params() == kernel_cloned.get_params()
|
||||
|
||||
# Check that all hyperparameters are equal.
|
||||
check_hyperparameters_equal(kernel, kernel_cloned)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kernel', kernels)
|
||||
def test_kernel_clone_after_set_params(kernel):
|
||||
# This test is to verify that using set_params does not
|
||||
# break clone on kernels.
|
||||
# This used to break because in kernels such as the RBF, non-trivial
|
||||
# logic that modified the length scale used to be in the constructor
|
||||
# See https://github.com/scikit-learn/scikit-learn/issues/6961
|
||||
# for more details.
|
||||
bounds = (1e-5, 1e5)
|
||||
kernel_cloned = clone(kernel)
|
||||
params = kernel.get_params()
|
||||
# RationalQuadratic kernel is isotropic.
|
||||
isotropic_kernels = (ExpSineSquared, RationalQuadratic)
|
||||
if 'length_scale' in params and not isinstance(kernel,
|
||||
isotropic_kernels):
|
||||
length_scale = params['length_scale']
|
||||
if np.iterable(length_scale):
|
||||
# XXX unreached code as of v0.22
|
||||
params['length_scale'] = length_scale[0]
|
||||
params['length_scale_bounds'] = bounds
|
||||
else:
|
||||
params['length_scale'] = [length_scale] * 2
|
||||
params['length_scale_bounds'] = bounds * 2
|
||||
kernel_cloned.set_params(**params)
|
||||
kernel_cloned_clone = clone(kernel_cloned)
|
||||
assert (kernel_cloned_clone.get_params() == kernel_cloned.get_params())
|
||||
assert id(kernel_cloned_clone) != id(kernel_cloned)
|
||||
check_hyperparameters_equal(kernel_cloned, kernel_cloned_clone)
|
||||
|
||||
|
||||
def test_matern_kernel():
|
||||
# Test consistency of Matern kernel for special values of nu.
|
||||
K = Matern(nu=1.5, length_scale=1.0)(X)
|
||||
# the diagonal elements of a matern kernel are 1
|
||||
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
|
||||
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
|
||||
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
|
||||
K = Matern(nu=0.5, length_scale=1.0)(X)
|
||||
assert_array_almost_equal(K, K_absexp)
|
||||
# matern kernel with coef0==inf is equal to RBF kernel
|
||||
K_rbf = RBF(length_scale=1.0)(X)
|
||||
K = Matern(nu=np.inf, length_scale=1.0)(X)
|
||||
assert_array_almost_equal(K, K_rbf)
|
||||
assert_allclose(K, K_rbf)
|
||||
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
|
||||
# result in nearly identical results as the general case for coef0 in
|
||||
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
|
||||
tiny = 1e-10
|
||||
for nu in [0.5, 1.5, 2.5]:
|
||||
K1 = Matern(nu=nu, length_scale=1.0)(X)
|
||||
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
|
||||
assert_array_almost_equal(K1, K2)
|
||||
# test that coef0==large is close to RBF
|
||||
large = 100
|
||||
K1 = Matern(nu=large, length_scale=1.0)(X)
|
||||
K2 = RBF(length_scale=1.0)(X)
|
||||
assert_array_almost_equal(K1, K2, decimal=2)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("kernel", kernels)
|
||||
def test_kernel_versus_pairwise(kernel):
|
||||
# Check that GP kernels can also be used as pairwise kernels.
|
||||
|
||||
# Test auto-kernel
|
||||
if kernel != kernel_rbf_plus_white:
|
||||
# For WhiteKernel: k(X) != k(X,X). This is assumed by
|
||||
# pairwise_kernels
|
||||
K1 = kernel(X)
|
||||
K2 = pairwise_kernels(X, metric=kernel)
|
||||
assert_array_almost_equal(K1, K2)
|
||||
|
||||
# Test cross-kernel
|
||||
K1 = kernel(X, Y)
|
||||
K2 = pairwise_kernels(X, Y, metric=kernel)
|
||||
assert_array_almost_equal(K1, K2)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("kernel", kernels)
|
||||
def test_set_get_params(kernel):
|
||||
# Check that set_params()/get_params() is consistent with kernel.theta.
|
||||
|
||||
# Test get_params()
|
||||
index = 0
|
||||
params = kernel.get_params()
|
||||
for hyperparameter in kernel.hyperparameters:
|
||||
if isinstance("string", type(hyperparameter.bounds)):
|
||||
if hyperparameter.bounds == "fixed":
|
||||
continue
|
||||
size = hyperparameter.n_elements
|
||||
if size > 1: # anisotropic kernels
|
||||
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
|
||||
params[hyperparameter.name])
|
||||
index += size
|
||||
else:
|
||||
assert_almost_equal(np.exp(kernel.theta[index]),
|
||||
params[hyperparameter.name])
|
||||
index += 1
|
||||
# Test set_params()
|
||||
index = 0
|
||||
value = 10 # arbitrary value
|
||||
for hyperparameter in kernel.hyperparameters:
|
||||
if isinstance("string", type(hyperparameter.bounds)):
|
||||
if hyperparameter.bounds == "fixed":
|
||||
continue
|
||||
size = hyperparameter.n_elements
|
||||
if size > 1: # anisotropic kernels
|
||||
kernel.set_params(**{hyperparameter.name: [value] * size})
|
||||
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
|
||||
[value] * size)
|
||||
index += size
|
||||
else:
|
||||
kernel.set_params(**{hyperparameter.name: value})
|
||||
assert_almost_equal(np.exp(kernel.theta[index]), value)
|
||||
index += 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize("kernel", kernels)
|
||||
def test_repr_kernels(kernel):
|
||||
# Smoke-test for repr in kernels.
|
||||
|
||||
repr(kernel)
|
||||
|
||||
|
||||
def test_warns_on_get_params_non_attribute():
|
||||
class MyKernel(Kernel):
|
||||
def __init__(self, param=5):
|
||||
pass
|
||||
|
||||
def __call__(self, X, Y=None, eval_gradient=False):
|
||||
return X
|
||||
|
||||
def diag(self, X):
|
||||
return np.ones(X.shape[0])
|
||||
|
||||
def is_stationary(self):
|
||||
return False
|
||||
|
||||
est = MyKernel()
|
||||
with pytest.warns(FutureWarning, match='AttributeError'):
|
||||
params = est.get_params()
|
||||
|
||||
assert params['param'] is None
|
||||
|
||||
|
||||
def test_rational_quadratic_kernel():
|
||||
kernel = RationalQuadratic(length_scale=[1., 1.])
|
||||
assert_raise_message(AttributeError,
|
||||
"RationalQuadratic kernel only supports isotropic "
|
||||
"version, please use a single "
|
||||
"scalar for length_scale", kernel, X)
|
Loading…
Add table
Add a link
Reference in a new issue