diff --git a/.gitignore b/.gitignore index 72364f9..97dc56b 100644 --- a/.gitignore +++ b/.gitignore @@ -87,3 +87,6 @@ ENV/ # Rope project settings .ropeproject + +# Pycharm IDE directory +.idea diff --git a/.travis.yml b/.travis.yml index 21797f3..eac4622 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,6 +9,6 @@ install: - pip install tensorflow==1.0.1 numpy scipy nose pandas codecov coverage - python setup.py install script: - - nosetests --nologcapture --with-coverage --cover-package=testing testing + - nosetests --nologcapture --with-coverage --cover-package=GPflowOpt testing after_success: - codecov diff --git a/GPflowOpt/__init__.py b/GPflowOpt/__init__.py index e69de29..d342abf 100644 --- a/GPflowOpt/__init__.py +++ b/GPflowOpt/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2017 Joachim van der Herten +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import acquisition +from . import domain +from .bo import BayesianOptimizer +from . import optim +from . import design diff --git a/GPflowOpt/acquisition.py b/GPflowOpt/acquisition.py new file mode 100644 index 0000000..adcd209 --- /dev/null +++ b/GPflowOpt/acquisition.py @@ -0,0 +1,214 @@ +# Copyright 2017 Joachim van der Herten +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from GPflow.param import Parameterized, AutoFlow, ParamList, DataHolder +from GPflow.model import Model +from GPflow._settings import settings + +import numpy as np + +import tensorflow as tf + +float_type = settings.dtypes.float_type +stability = settings.numerics.jitter_level + + +class Acquisition(Parameterized): + """ + Class representing acquisition functions which map the predictive distribution of a Bayesian model into a score. In + Bayesian Optimization this function is typically optimized over the optimization domain to determine the next + point for evaluation. + + The acquisition function object maintains a list of models. Subclasses should implement a build_acquisition + function which computes the acquisition function using tensorflow. + + Acquisition functions can be added or multiplied to construct joint criteria (for instance for constrained + optimization) + """ + + def __init__(self, models=[], optimize_restarts=5): + super(Acquisition, self).__init__() + self.models = ParamList(np.atleast_1d(models).tolist()) + self._default_params = map(lambda m: m.get_free_state(), self.models) + + assert(optimize_restarts >= 0) + self._optimize_restarts = optimize_restarts + self._optimize_all() + + def _optimize_all(self): + for model, hypers in zip(self.models, self._default_params): + runs = [] + # Start from supplied hyperparameters + model.set_state(hypers) + for i in range(self._optimize_restarts): + if i > 0: + model.randomize() + try: + result = model.optimize() + runs.append(result) + except tf.errors.InvalidArgumentError: + print("Warning - optimization restart {0}/{1} failed".format(i + 1, self._optimize_restarts)) + best_idx = np.argmin(map(lambda r: r.fun, runs)) + model.set_state(runs[best_idx].x) + + def _build_acquisition_wrapper(self, Xcand, gradients=True): + acq = self.build_acquisition(Xcand) + if gradients: + return acq, tf.gradients(acq, [Xcand], name="acquisition_gradient")[0] + else: + return acq + + def set_data(self, X, Y): + num_outputs_sum = 0 + for model in self.models: + num_outputs = model.Y.shape[1] + Ypart = Y[:, num_outputs_sum:num_outputs_sum + num_outputs] + num_outputs_sum += num_outputs + + model.X = X + model.Y = Ypart + + self._optimize_all() + self.setup() + return num_outputs_sum + + @property + def data(self): + if self._tf_mode: + return self.models[0].X, tf.concat(list(map(lambda model: model.Y, self.models)), 1) + else: + return self.models[0].X.value, np.hstack(map(lambda model: model.Y.value, self.models)) + + def constraint_indices(self): + return np.empty((0,), dtype=int) + + def objective_indices(self): + return np.setdiff1d(np.arange(self.data[1].shape[1]), self.constraint_indices()) + + def setup(self): + """ + Method triggered after calling set_data(). Override for pre-calculation of quantities used later in + the evaluation of the acquisition function for candidate points + """ + pass + + @AutoFlow((float_type, [None, None])) + def evaluate_with_gradients(self, Xcand): + return self._build_acquisition_wrapper(Xcand, gradients=True) + + @AutoFlow((float_type, [None, None])) + def evaluate(self, Xcand): + return self._build_acquisition_wrapper(Xcand, gradients=False) + + def __add__(self, other): + return AcquisitionSum(self, other) + + def __mul__(self, other): + return AcquisitionProduct(self, other) + + +class ExpectedImprovement(Acquisition): + """ + Implementation of the Expected Improvement acquisition function for single-objective global optimization + (Mockus et al, 1975) + """ + + def __init__(self, model): + super(ExpectedImprovement, self).__init__(model) + assert (isinstance(model, Model)) + self.fmin = DataHolder(np.zeros(1)) + self.setup() + + def setup(self): + super(ExpectedImprovement, self).setup() + # Obtain the lowest posterior mean for the previous evaluations + samples_mean, _ = self.models[0].predict_f(self.data[0]) + self.fmin.set_data(np.min(samples_mean, axis=0)) + # samples_mean, _ = self.models[0].build_predict(self.data[0]) + # self.fmin = tf.reduce_min(samples_mean, axis=0) + + def build_acquisition(self, Xcand): + # Obtain predictive distributions for candidates + candidate_mean, candidate_var = self.models[0].build_predict(Xcand) + candidate_var = tf.maximum(candidate_var, stability) + + # Compute EI + normal = tf.contrib.distributions.Normal(candidate_mean, tf.sqrt(candidate_var)) + t1 = (self.fmin - candidate_mean) * normal.cdf(self.fmin) + t2 = candidate_var * normal.pdf(self.fmin) + return tf.add(t1, t2, name=self.__class__.__name__) + + +class ProbabilityOfFeasibility(Acquisition): + """ + Probability of Feasibility acquisition function for learning feasible regions + """ + + def __init__(self, model): + super(ProbabilityOfFeasibility, self).__init__(model) + + def constraint_indices(self): + return np.arange(self.data[1].shape[1]) + + def build_acquisition(self, Xcand): + candidate_mean, candidate_var = self.models[0].build_predict(Xcand) + candidate_var = tf.maximum(candidate_var, stability) + normal = tf.contrib.distributions.Normal(candidate_mean, tf.sqrt(candidate_var)) + return normal.cdf(tf.constant(0.0, dtype=float_type), name=self.__class__.__name__) + + +class AcquisitionBinaryOperator(Acquisition): + """ + Base class for implementing binary operators for acquisition functions, for defining joint acquisition functions + """ + + def __init__(self, lhs, rhs, oper): + super(AcquisitionBinaryOperator, self).__init__() + assert isinstance(lhs, Acquisition) + assert isinstance(rhs, Acquisition) + self.lhs = lhs + self.rhs = rhs + self._oper = oper + + @Acquisition.data.getter + def data(self): + lhsX, lhsY = self.lhs.data + rhsX, rhsY = self.rhs.data + if self._tf_mode: + return lhsX, tf.concat((lhsY, rhsY), 1) + else: + return lhsX, np.hstack((lhsY, rhsY)) + + def set_data(self, X, Y): + offset_lhs = self.lhs.set_data(X, Y) + offset_rhs = self.rhs.set_data(X, Y[:, offset_lhs:]) + return offset_lhs + offset_rhs + + def constraint_indices(self): + offset = self.lhs.data[1].shape[1] + return np.hstack((self.lhs.constraint_indices(), offset + self.rhs.constraint_indices())) + + def build_acquisition(self, Xcand): + return self._oper(self.lhs.build_acquisition(Xcand), self.rhs.build_acquisition(Xcand), + name=self.__class__.__name__) + + +class AcquisitionSum(AcquisitionBinaryOperator): + def __init__(self, lhs, rhs): + super(AcquisitionSum, self).__init__(lhs, rhs, tf.add) + + +class AcquisitionProduct(AcquisitionBinaryOperator): + def __init__(self, lhs, rhs): + super(AcquisitionProduct, self).__init__(lhs, rhs, tf.multiply) diff --git a/GPflowOpt/bo.py b/GPflowOpt/bo.py new file mode 100644 index 0000000..18f58e7 --- /dev/null +++ b/GPflowOpt/bo.py @@ -0,0 +1,135 @@ +# Copyright 2017 Joachim van der Herten +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +from scipy.optimize import OptimizeResult + +from .acquisition import Acquisition +from .optim import Optimizer, SciPyOptimizer +from .design import EmptyDesign + + +class BayesianOptimizer(Optimizer): + """ + Specific optimizer representing bayesian optimization. Takes a domain, an acquisition function and an optimization + strategy for the acquisition function. + """ + + def __init__(self, domain, acquisition, optimizer=None, initial=None): + assert isinstance(acquisition, Acquisition) + super(BayesianOptimizer, self).__init__(domain, exclude_gradient=True) + self.acquisition = acquisition + if initial is None: + initial = EmptyDesign(domain) + self.set_initial(initial.generate()) + self.optimizer = SciPyOptimizer(domain) if optimizer is None else optimizer + + def _update_model_data(self, newX, newY): + """ + Update the underlying models of the acquisition function with new data + :param newX: samples (# new samples x indim) + :param newY: values obtained by evaluating the objective and constraint functions (# new samples x # targets) + """ + assert self.acquisition.data[0].shape[1] == newX.shape[-1] + assert self.acquisition.data[1].shape[1] == newY.shape[-1] + assert newX.shape[0] == newY.shape[0] + X = np.vstack((self.acquisition.data[0], newX)) + Y = np.vstack((self.acquisition.data[1], newY)) + self.acquisition.set_data(X, Y) + + def _acquisition_wrapper(self, x): + """ + Negation of the acquisition score/gradient + :param x: candidate points (# candidates x indim) + :return: negative score and gradient (maximizing acquisition function vs minimization algorithm) + """ + scores, grad = self.acquisition.evaluate_with_gradients(np.atleast_2d(x)) + return -scores, -grad + + def _evaluate(self, X, fxs): + fxs = np.atleast_1d(fxs) + if X.size > 0: + evaluations = np.hstack(map(lambda f: f(X), fxs)) + assert evaluations.shape[1] == self.acquisition.data[1].shape[1] + return evaluations + else: + return np.empty((0, self.acquisition.data[1].shape[1])) + + def _create_result(self, success, message): + """ + Given all data evaluated after the optimization, analyse and return an OptimizeResult + :param success: Optimization successfull? (True/False) + :param message: return message + :return: OptimizeResult object + """ + X, Y = self.acquisition.data + + # Filter on constraints + # Extend with valid column - in case of no constrains + Yext = np.hstack((-np.ones((Y.shape[0], 1)), Y[:, self.acquisition.constraint_indices()])) + valid = np.all(Yext < 0, axis=1) + + if not np.any(valid): + return OptimizeResult(success=False, + message="No evaluations satisfied the constraints") + + valid_X = X[valid, :] + valid_Y = Y[valid, :] + valid_Yo = valid_Y[:, self.acquisition.objective_indices()] + + # Here is the place to plug in pareto front if valid_Y.shape[1] > 1 + # else + idx = np.argmin(valid_Yo) + + return OptimizeResult(x=valid_X[idx, :], + success=success, + fun=valid_Yo[idx, :], + message=message) + + def optimize(self, objectivefx, n_iter=20): + """ + Run Bayesian optimization for a number of iterations. Each iteration a new data point is selected by optimizing + an acquisition function. This point is evaluated on the objective and black-box constraints. This retrieved + information then updates the model + :param objectivefx: (list of) expensive black-box objective and constraint functions + :param n_iter: number of iterations to run + :return: OptimizeResult object + """ + + # Bayesian optimization is Gradient-free: provide wrapper. Also anticipate for lists and pass on a function + # which satisfies the optimizer.optimize interface + fx = lambda X: (self._evaluate(X, objectivefx), np.zeros((X.shape[0], 0))) + return super(BayesianOptimizer, self).optimize(fx, n_iter=n_iter) + + def _optimize(self, fx, n_iter): + """ + Internal optimization function. Receives and ObjectiveWrapper + :param fx: ObjectiveWrapper object wrapping expensive black-box objective and constraint functions + :param n_iter: number of iterations to run + :return: OptimizeResult object + """ + + # Evaluate and add the initial design (if any) + initial = self.get_initial() + values = fx(initial) + self._update_model_data(initial, values) + # Remove initial design for additional calls to optimize to proceed optimization + self.set_initial(EmptyDesign(self.domain).generate()) + + # Optimization loop + for i in range(n_iter): + result = self.optimizer.optimize(self._acquisition_wrapper) + self._update_model_data(result.x, fx(result.x)) + + return self._create_result(True, "OK") diff --git a/GPflowOpt/design.py b/GPflowOpt/design.py new file mode 100644 index 0000000..d01994e --- /dev/null +++ b/GPflowOpt/design.py @@ -0,0 +1,69 @@ +# Copyright 2017 Joachim van der Herten +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np + + +class Design(object): + """ + Space-filling designs generated within a domain. + """ + + def __init__(self, size, domain): + super(Design, self).__init__() + self.size = size + self.domain = domain + + def generate(self): + pass + + +class RandomDesign(Design): + """ + Random space-filling design + """ + + def __init__(self, size, domain): + super(RandomDesign, self).__init__(size, domain) + + def generate(self): + X = np.random.rand(self.size, self.domain.size) + return X * (self.domain.upper - self.domain.lower) + self.domain.lower + + +class FactorialDesign(Design): + """ + Grid-based design + """ + + def __init__(self, levels, domain): + self.levels = levels + size = levels ** domain.size + super(FactorialDesign, self).__init__(size, domain) + + def generate(self): + Xs = np.meshgrid(*[np.linspace(l, u, self.levels) for l, u in zip(self.domain.lower, self.domain.upper)]) + return np.vstack(map(lambda X: X.ravel(), Xs)).T + + +class EmptyDesign(Design): + """ + No design, used as placeholder + """ + + def __init__(self, domain): + super(EmptyDesign, self).__init__(0, domain) + + def generate(self): + return np.empty((0, self.domain.size)) diff --git a/GPflowOpt/domain.py b/GPflowOpt/domain.py new file mode 100644 index 0000000..55024a7 --- /dev/null +++ b/GPflowOpt/domain.py @@ -0,0 +1,140 @@ +# Copyright 2017 Joachim van der Herten +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +from itertools import chain + +from GPflow.param import Parentable + + +class Domain(Parentable): + """ + Basic class, representing an optimization domain by aggregating several parameters. + """ + + def __init__(self, parameters): + super(Domain, self).__init__() + self._parameters = parameters + + @property + def lower(self): + return np.array(list(map(lambda param: param.lower, self._parameters))).flatten() + + @property + def upper(self): + return np.array(list(map(lambda param: param.upper, self._parameters))).flatten() + + # def optimize(self, optimizer, objectivefx): + # optimizer.domain = self + # result = optimizer.optimize(objectivefx) + + def __add__(self, other): + assert isinstance(other, Domain) + return Domain(self._parameters + other._parameters) + + @property + def size(self): + return sum(map(lambda param: param.size, self._parameters)) + + def __setattr__(self, key, value): + super(Domain, self).__setattr__(key, value) + if key is not '_parent': + if isinstance(value, Parentable): + value._parent = self + if isinstance(value, list): + for val in (x for x in value if isinstance(x, Parentable)): + val._parent = self + + def __eq__(self, other): + return self._parameters == other._parameters + + def __contains__(self, X): + X = np.atleast_2d(X) + if X.shape[1] is not self.size: + return False + return np.all(np.logical_and((self.lower <= X), (X <= self.upper))) + + def __iter__(self): + for v in chain(*map(iter, self._parameters)): + yield v + + def __getitem__(self, item): + return self._parameters[item] + + @property + def value(self): + return np.vstack(map(lambda p: p.value, self._parameters)).T + + @value.setter + def value(self, x): + x = np.atleast_2d(x) + assert (len(x.shape) == 2) + assert (x.shape[1] == self.size) + offset = 0 + for p in self._parameters: + p.value = x[:, offset:offset + p.size] + offset += p.size + + +class Parameter(Domain): + """ + Abstract class representing a parameter (which corresponds to a one-dimensional domain) + This class can be derived for continuous, discrete and categorical parameters + """ + + def __init__(self, label, xinit): + super(Parameter, self).__init__([self]) + self.label = label + self._x = np.atleast_1d(xinit) + + @Domain.size.getter + def size(self): + return 1 + + def __iter__(self): + yield self + + @Domain.value.getter + def value(self): + return self._x + + @value.setter + def value(self, x): + x = np.atleast_1d(x) + self._x = x.ravel() + + +class ContinuousParameter(Parameter): + def __init__(self, label, lb, ub, xinit=None): + self._range = [lb, ub] + super(ContinuousParameter, self).__init__(label, xinit or ((ub + lb) / 2.0)) + + @Parameter.lower.getter + def lower(self): + return self._range[0] + + @Parameter.upper.getter + def upper(self): + return self._range[1] + + @lower.setter + def lower(self, value): + self._range[0] = value + + @upper.setter + def upper(self, value): + self._range[1] = value + + def __eq__(self, other): + return isinstance(other, ContinuousParameter) and self.lower == other.lower and self.upper == other.upper diff --git a/GPflowOpt/optim.py b/GPflowOpt/optim.py new file mode 100644 index 0000000..15dfe49 --- /dev/null +++ b/GPflowOpt/optim.py @@ -0,0 +1,169 @@ +# Copyright 2017 Joachim van der Herten +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +from scipy.optimize import OptimizeResult, minimize +from GPflow import model +from GPflow._settings import settings + +from .design import RandomDesign + + +class ObjectiveWrapper(model.ObjectiveWrapper): + def __init__(self, objective, exclude_gradient): + super(ObjectiveWrapper, self).__init__(objective) + self._no_gradient = exclude_gradient + self.counter = 0 + + def __call__(self, x): + x = np.atleast_2d(x) + f, g = super(ObjectiveWrapper, self).__call__(x) + self.counter += x.shape[0] + if self._no_gradient: + return f + return f, g + + +class Optimizer(object): + """ + Generic class representing an optimizer. Wraps an optimization algorithm over a domain, starting from an initial + (set of) point(s). + """ + + def __init__(self, domain, exclude_gradient=False): + super(Optimizer, self).__init__() + self.domain = domain + self._wrapper_args = dict(exclude_gradient=exclude_gradient) + self._initial = domain.value + + def optimize(self, objectivefx, **kwargs): + objective = ObjectiveWrapper(objectivefx, **self._wrapper_args) + try: + result = self._optimize(objective, **kwargs) + except KeyboardInterrupt: + result = OptimizeResult(x=objective._previous_x, + success=False, + message="Caught KeyboardInterrupt, returning last good value.") + result.x = np.atleast_2d(result.x) + result.nfev = objective.counter + return result + + def get_initial(self): + return self._initial + + def set_initial(self, initial): + initial = np.atleast_2d(initial) + assert (initial.shape[1] == self.domain.size) + self._initial = initial + + def gradient_enabled(self): + return not self._wrapper_args['exclude_gradient'] + + +class CandidateOptimizer(Optimizer): + """ + This optimizer optimizes an objective function by evaluating of a set of candidate points (and returning the point + with minimal objective value. + """ + + def __init__(self, domain, candidates, batch=False): + super(CandidateOptimizer, self).__init__(domain, exclude_gradient=True) + self.candidates = candidates + self._batch_mode = batch + + def get_initial(self): + return np.vstack((super(CandidateOptimizer, self).get_initial(), self.candidates)) + + def _evaluate_one_by_one(self, objective, X): + return np.vstack(map(lambda x: objective(x), X)) + + def _optimize(self, objective): + points = self.get_initial() + evaluations = objective(points) if self._batch_mode else self._evaluate_one_by_one(objective, points) + idx_best = np.argmin(evaluations, axis=0) + + return OptimizeResult(x=points[idx_best, :], + success=True, + fun=evaluations[idx_best, :], + nfev=points.shape[0], + message="OK") + + +class MCOptimizer(CandidateOptimizer): + """ + This class represents optimization of an objective function by evaluating a set of random points. Each call to + optimize, a different set of random points is evaluated. + """ + + def __init__(self, domain, nsamples, batch=False): + super(MCOptimizer, self).__init__(domain, np.empty((0, domain.size)), batch=batch) + self._design = RandomDesign(nsamples, domain) + + def _optimize(self, objective): + self.candidates = self._design.generate() + return super(MCOptimizer, self)._optimize(objective) + + +class SciPyOptimizer(Optimizer): + """ + Wraps optimization with scipy's minimize function. + """ + + def __init__(self, domain, method='L-BFGS-B', tol=None, maxiter=1000): + super(SciPyOptimizer, self).__init__(domain) + options = dict(disp=settings.verbosity.optimisation_verb, + maxiter=maxiter) + self.config = dict(tol=tol, + method=method, + options=options) + + def _optimize(self, objective): + objective1d = lambda X: tuple(map(lambda arr: arr.ravel(), objective(X))) + result = minimize(fun=objective1d, + x0=self.get_initial(), + jac=self.gradient_enabled(), + bounds=list(zip(self.domain.lower, self.domain.upper)), + **self.config) + return result + + +class StagedOptimizer(Optimizer): + """ + Represents an optimization pipeline. A list of optimizers can be specified (all on the same domain). The optimal + solution of the an optimizer is used as an initial point for the next optimizer. + """ + + def __init__(self, optimizers): + assert all(map(lambda opt: optimizers[0].domain == opt.domain, optimizers)) + no_gradient = any(map(lambda opt: not opt.gradient_enabled(), optimizers)) + super(StagedOptimizer, self).__init__(optimizers[0].domain, exclude_gradient=no_gradient) + self.optimizers = optimizers + + def optimize(self, objectivefx): + self.optimizers[0].set_initial(self.get_initial()) + fun_evals = [] + for current, next in zip(self.optimizers[:-1], self.optimizers[1:]): + result = current.optimize(objectivefx) + fun_evals.append(result.nfev) + if not result.success: + result.message += " StagedOptimizer interrupted after {0}.".format(current.__class__.__name__) + break + next.set_initial(result.x) + + if result.success: + result = self.optimizers[-1].optimize(objectivefx) + fun_evals.append(result.nfev) + result.nfev = sum(fun_evals) + result.nstages = len(fun_evals) + return result diff --git a/README.md b/README.md index 5a69383..213cce0 100644 --- a/README.md +++ b/README.md @@ -8,3 +8,4 @@ If you're interested in helping shape GPflowOpt, let us know via an issue on thi Bayesian Optimization using GPflow [![Build Status](https://travis-ci.org/GPflow/GPflowOpt.svg?branch=master)](https://travis-ci.org/GPflow/GPflowOpt) +[![Coverage Status](https://codecov.io/gh/GPflow/GPflowOpt/branch/master/graph/badge.svg)](https://codecov.io/gh/GPflow/GPflowOpt) diff --git a/testing/test_acquisition.py b/testing/test_acquisition.py new file mode 100644 index 0000000..e6afb6d --- /dev/null +++ b/testing/test_acquisition.py @@ -0,0 +1,248 @@ +import GPflowOpt +import unittest +import numpy as np +import GPflow +import tensorflow as tf + + +def parabola2d(X): + return np.atleast_2d(np.sum(X ** 2, axis=1)).T + + +def plane(X): + return X[:, [0]] - 0.5 + + +class _TestAcquisition(object): + """ + Defines some basic verifications for all acquisition functions. Test classes can derive from this + """ + + @property + def domain(self): + return np.sum([GPflowOpt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1, 3)]) + + def create_parabola_model(self, design=None): + if design is None: + design = GPflowOpt.design.FactorialDesign(4, self.domain) + X, Y = design.generate(), parabola2d(design.generate()) + m = GPflow.gpr.GPR(X, Y, GPflow.kernels.RBF(2, ARD=True, lengthscales=X.var(axis=0))) + m.kern.variance.prior = GPflow.priors.Gamma(3.0, 1.0 / 3.0) + return m + + def create_plane_model(self, design=None): + if design is None: + design = GPflowOpt.design.FactorialDesign(5, self.domain) + X, Y = design.generate(), plane(design.generate()) + m = GPflow.gpr.GPR(X, Y, GPflow.kernels.RBF(2, ARD=True, lengthscales=X.std(axis=0))) + return m + + def setUp(self): + self.acquisition = None + + def test_result_shape(self): + # Verify the returned shape of evaluate + design = GPflowOpt.design.RandomDesign(50, self.domain) + + with tf.Graph().as_default(): + free_vars = tf.placeholder(tf.float64, [None]) + l = self.acquisition.make_tf_array(free_vars) + x_tf = tf.placeholder(tf.float64, shape=(50, 2)) + with self.acquisition.tf_mode(): + tens = self.acquisition.build_acquisition(x_tf) + self.assertTrue(isinstance(tens, tf.Tensor), msg="no Tensor was returned") + self.assertListEqual(tens.get_shape().as_list(), [50, None], msg="Tensor of incorrect shape returned") + + res = self.acquisition.evaluate(design.generate()) + self.assertTupleEqual(res.shape, (50, 1), + msg="Incorrect shape returned for evaluate of {0}".format(self.__class__.__name__)) + res = self.acquisition.evaluate_with_gradients(design.generate()) + self.assertTrue(isinstance(res, tuple)) + self.assertTrue(len(res), 2) + self.assertTupleEqual(res[0].shape, (50, 1), + msg="Incorrect shape returned for evaluate of {0}".format(self.__class__.__name__)) + self.assertTupleEqual(res[1].shape, (50, self.domain.size), + msg="Incorrect shape returned for gradient of {0}".format(self.__class__.__name__)) + + def test_data(self): + # Test the data property + with tf.Graph().as_default(): + free_vars = tf.placeholder(tf.float64, [None]) + l = self.acquisition.make_tf_array(free_vars) + with self.acquisition.tf_mode(): + self.assertTrue(isinstance(self.acquisition.data[0], tf.Tensor), + msg="data property should return Tensors") + self.assertTrue(isinstance(self.acquisition.data[1], tf.Tensor), + msg="data property should return Tensors") + + def test_data_update(self): + # Verify a data update + design = GPflowOpt.design.RandomDesign(10, self.domain) + X = np.vstack((self.acquisition.data[0], design.generate())) + Y = np.hstack([parabola2d(X)] * self.acquisition.data[1].shape[1]) + self.acquisition.set_data(X, Y) + np.testing.assert_allclose(self.acquisition.data[0], X, err_msg="Samples not updated") + np.testing.assert_allclose(self.acquisition.data[1], Y, err_msg="Values not updated") + + +class TestExpectedImprovement(_TestAcquisition, unittest.TestCase): + def setUp(self): + super(TestExpectedImprovement, self).setUp() + self.model = self.create_parabola_model() + self.acquisition = GPflowOpt.acquisition.ExpectedImprovement(self.model) + + def test_object_integrity(self): + self.assertEqual(len(self.acquisition.models), 1, msg="Model list has incorrect length.") + self.assertEqual(self.acquisition.models[0], self.model, msg="Incorrect model stored in ExpectedImprovement") + self.assertEqual(self.acquisition.objective_indices(), np.arange(1, dtype=int), + msg="ExpectedImprovement returns all objectives") + + def test_setup(self): + fmin = np.min(self.acquisition.data[1]) + self.assertGreater(self.acquisition.fmin.value, 0, msg="The minimum (0) is not amongst the design.") + self.assertTrue(np.allclose(self.acquisition.fmin.value, fmin, atol=1e-2), msg="fmin computed incorrectly") + + # Now add the actual minimum + p = np.array([[0.0, 0.0]]) + self.acquisition.set_data(np.vstack((self.model.X.value, p)), + np.vstack((self.model.Y.value, parabola2d(p)))) + self.assertTrue(np.allclose(self.acquisition.fmin.value, 0, atol=1e-1), msg="fmin not updated") + + def test_EI_validity(self): + Xcenter = np.random.rand(20, 2) * 0.25 - 0.125 + X = np.random.rand(100, 2) * 2 - 1 + hor_idx = np.abs(X[:, 0]) > 0.8 + ver_idx = np.abs(X[:, 1]) > 0.8 + Xborder = np.vstack((X[hor_idx, :], X[ver_idx, :])) + ei1 = self.acquisition.evaluate(Xborder) + ei2 = self.acquisition.evaluate(Xcenter) + self.assertGreater(np.min(ei2), np.max(ei1)) + + +class TestProbabilityOfFeasibility(_TestAcquisition, unittest.TestCase): + def setUp(self): + super(TestProbabilityOfFeasibility, self).setUp() + self.model = self.create_plane_model() + self.acquisition = GPflowOpt.acquisition.ProbabilityOfFeasibility(self.model) + + def test_object_integrity(self): + self.assertEqual(len(self.acquisition.models), 1, msg="Model list has incorrect length.") + self.assertEqual(self.acquisition.models[0], self.model, msg="Incorrect model stored in PoF") + self.assertEqual(self.acquisition.constraint_indices(), np.arange(1, dtype=int), + msg="PoF returns all constraints") + + def test_PoF_validity(self): + X1 = np.random.rand(10, 2) / 2 + X2 = np.random.rand(10, 2) / 2 + 0.5 + self.assertTrue(np.allclose(self.acquisition.evaluate(X1), 1), msg="Left half of plane is feasible") + self.assertTrue(np.allclose(self.acquisition.evaluate(X2), 0), msg="Right half of plane is not feasible") + + +class _TestAcquisitionBinaryOperator(_TestAcquisition): + def test_object_integrity(self): + self.assertTrue(isinstance(self.acquisition.lhs, GPflowOpt.acquisition.Acquisition), + msg="Left hand operand should be an acquisition object") + self.assertTrue(isinstance(self.acquisition.rhs, GPflowOpt.acquisition.Acquisition), + msg="Right hand operand should be an acquisition object") + + def test_data(self): + super(_TestAcquisitionBinaryOperator, self).test_data() + np.testing.assert_allclose(self.acquisition.data[0], self.acquisition.lhs.data[0], + err_msg="Samples should be equal for all operands") + np.testing.assert_allclose(self.acquisition.data[0], self.acquisition.rhs.data[0], + err_msg="Samples should be equal for all operands") + + Y = np.hstack((self.acquisition.lhs.data[1], self.acquisition.rhs.data[1])) + np.testing.assert_allclose(self.acquisition.data[1], Y, + err_msg="Value should be horizontally concatenated") + + +class TestAcquisitionSum(_TestAcquisitionBinaryOperator, unittest.TestCase): + def setUp(self): + super(TestAcquisitionSum, self).setUp() + self.models = [self.create_parabola_model(), self.create_parabola_model()] + self.acquisition = GPflowOpt.acquisition.AcquisitionSum( + GPflowOpt.acquisition.ExpectedImprovement(self.models[0]), + GPflowOpt.acquisition.ExpectedImprovement(self.models[1])) + + def test_sum_validity(self): + design = GPflowOpt.design.FactorialDesign(4, self.domain) + m = self.create_parabola_model(design) + single_ei = GPflowOpt.acquisition.ExpectedImprovement(m) + p1 = self.acquisition.evaluate(design.generate()) + p2 = single_ei.evaluate(design.generate()) + np.testing.assert_allclose(p2, p1 / 2, rtol=1e-3, err_msg="The sum of 2 EI should be the double of only EI") + + def test_generating_operator(self): + design = GPflowOpt.design.FactorialDesign(4, self.domain) + joint = GPflowOpt.acquisition.ExpectedImprovement(self.create_parabola_model(design)) + \ + GPflowOpt.acquisition.ExpectedImprovement(self.create_parabola_model(design)) + self.assertTrue(isinstance(joint, GPflowOpt.acquisition.AcquisitionSum)) + + def test_indices(self): + np.testing.assert_allclose(self.acquisition.objective_indices(), np.arange(2, dtype=int), + err_msg="Sum of two EI should return all objectives") + np.testing.assert_allclose(self.acquisition.constraint_indices(), np.arange(0, dtype=int), + err_msg="Sum of two EI should return no constraints") + + +class TestAcquisitionProduct(_TestAcquisitionBinaryOperator, unittest.TestCase): + def setUp(self): + super(TestAcquisitionProduct, self).setUp() + self.models = [self.create_parabola_model(), self.create_parabola_model()] + self.acquisition = GPflowOpt.acquisition.AcquisitionProduct( + GPflowOpt.acquisition.ExpectedImprovement(self.models[0]), + GPflowOpt.acquisition.ExpectedImprovement(self.models[1])) + + def test_product_validity(self): + design = GPflowOpt.design.FactorialDesign(4, self.domain) + m = self.create_parabola_model(design) + single_ei = GPflowOpt.acquisition.ExpectedImprovement(m) + p1 = self.acquisition.evaluate(design.generate()) + p2 = single_ei.evaluate(design.generate()) + np.testing.assert_allclose(p2, np.sqrt(p1), rtol=1e-3, + err_msg="The product of 2 EI should be the square of one EI") + + def test_generating_operator(self): + joint = GPflowOpt.acquisition.ExpectedImprovement(self.create_parabola_model()) * \ + GPflowOpt.acquisition.ExpectedImprovement(self.create_parabola_model()) + self.assertTrue(isinstance(joint, GPflowOpt.acquisition.AcquisitionProduct)) + + def test_indices(self): + np.testing.assert_allclose(self.acquisition.objective_indices(), np.arange(2, dtype=int), + err_msg="Product of two EI should return all objectives") + np.testing.assert_allclose(self.acquisition.constraint_indices(), np.arange(0, dtype=int), + err_msg="Product of two EI should return no constraints") + + +class TestJointAcquisition(unittest.TestCase): + @property + def domain(self): + return np.sum([GPflowOpt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1, 3)]) + + def test_constrained_EI(self): + design = GPflowOpt.design.FactorialDesign(4, self.domain) + X = design.generate() + Yo = parabola2d(X) + Yc = plane(X) + m1 = GPflow.gpr.GPR(X, Yo, GPflow.kernels.RBF(2, ARD=True, lengthscales=X.std(axis=0))) + m2 = GPflow.gpr.GPR(X, Yc, GPflow.kernels.RBF(2, ARD=True, lengthscales=X.std(axis=0) / 2)) + joint = GPflowOpt.acquisition.ExpectedImprovement(m1) * GPflowOpt.acquisition.ProbabilityOfFeasibility(m2) + + np.testing.assert_allclose(joint.objective_indices(), np.array([0], dtype=int)) + np.testing.assert_allclose(joint.constraint_indices(), np.array([1], dtype=int)) + + def test_hierarchy(self): + design = GPflowOpt.design.FactorialDesign(4, self.domain) + X = design.generate() + Yo = parabola2d(X) + Yc = plane(X) + m1 = GPflow.gpr.GPR(X, Yo, GPflow.kernels.RBF(2, ARD=True, lengthscales=X.std(axis=0))) + m2 = GPflow.gpr.GPR(X, Yo, GPflow.kernels.RBF(2, ARD=True, lengthscales=X.std(axis=0))) + m3 = GPflow.gpr.GPR(X, Yc, GPflow.kernels.RBF(2, ARD=True, lengthscales=X.std(axis=0) / 2)) + joint = GPflowOpt.acquisition.ExpectedImprovement(m1) * \ + (GPflowOpt.acquisition.ProbabilityOfFeasibility(m3) + + GPflowOpt.acquisition.ExpectedImprovement(m2)) + + np.testing.assert_allclose(joint.objective_indices(), np.array([0, 2], dtype=int)) + np.testing.assert_allclose(joint.constraint_indices(), np.array([1], dtype=int)) diff --git a/testing/test_design.py b/testing/test_design.py new file mode 100644 index 0000000..3edb1ac --- /dev/null +++ b/testing/test_design.py @@ -0,0 +1,42 @@ +import GPflowOpt +import unittest +import numpy as np + + +class _TestDesign(object): + @property + def design(self): + raise NotImplementedError() + + def setUp(self): + self.domain = np.sum([GPflowOpt.domain.ContinuousParameter("x{0}".format(i), -i, 2 * i) for i in range(1, 4)]) + + def test_design_compliance(self): + points = self.design.generate() + self.assertTupleEqual(points.shape, (self.design.size, self.design.domain.size), msg="Generated design does not" + "satisfy specifications") + self.assertIn(points, self.design.domain, "Not all generated points are generated within the domain") + + +class TestRandomDesign(_TestDesign, unittest.TestCase): + @_TestDesign.design.getter + def design(self): + return GPflowOpt.design.RandomDesign(200, self.domain) + + +class TestEmptyDesign(_TestDesign, unittest.TestCase): + @_TestDesign.design.getter + def design(self): + return GPflowOpt.design.EmptyDesign(self.domain) + + +class TestFactorialDesign(_TestDesign, unittest.TestCase): + @_TestDesign.design.getter + def design(self): + return GPflowOpt.design.FactorialDesign(4, self.domain) + + def test_validity(self): + A = self.design.generate() + for i in range(1, self.domain.size + 1): + self.assertTrue(np.all(np.any(A[i - 1, :] - np.linspace(-i, 2 * i, 4)[:, None] < 1e-4, axis=0)), + msg="Generated off-grid.") diff --git a/testing/test_domain.py b/testing/test_domain.py new file mode 100644 index 0000000..c0c0817 --- /dev/null +++ b/testing/test_domain.py @@ -0,0 +1,114 @@ +import GPflowOpt +import unittest +import numpy as np + + +class TestContinuousParameter(unittest.TestCase): + + def test_simple(self): + p = GPflowOpt.domain.ContinuousParameter("x1", 0, 1) + self.assertListEqual(p._range, [0,1], msg="Internal storage of object incorrect") + self.assertEqual(p.lower, 0, msg="Lower should equal 0") + self.assertEqual(p.upper, 1, msg="Upper should equal 1") + self.assertEqual(p.size, 1, msg="Size of parameter should equal 1") + + p.upper = 2 + self.assertEqual(p.upper, 2, msg="After assignment, upper should equal 2") + p.lower = 1 + self.assertEqual(p.lower, 1, msg="After assignment, lower should equal 2") + + def test_equality(self): + p = GPflowOpt.domain.ContinuousParameter("x1", 0, 1) + pne = GPflowOpt.domain.ContinuousParameter("x1", 0, 2) + self.assertNotEqual(p, pne, msg="Should not be equal (invalid upper)") + pne = GPflowOpt.domain.ContinuousParameter("x1", -1, 1) + self.assertNotEqual(p, pne, msg="Should not be equal (invalid lower)") + pne = GPflowOpt.domain.ContinuousParameter("x1", -1, 2) + self.assertNotEqual(p, pne, msg="Should not be equal (invalid lower/upper)") + p.lower = -1 + p.upper = 2 + self.assertEqual(p, pne, msg="Should be equal after adjusting bounds") + + def test_containment(self): + p = GPflowOpt.domain.ContinuousParameter("x1", 0, 1) + self.assertIn(0, p, msg="Point is within domain") + self.assertIn(0.5, p, msg="Point is within domain") + self.assertIn(1, p, msg="Point is within domain") + self.assertNotIn(1.1, p, msg="Point is not within domain") + self.assertNotIn(-0.5, p, msg="Point is not within domain") + + def test_value(self): + p = GPflowOpt.domain.ContinuousParameter("x1", 0, 1) + self.assertTupleEqual(p.value.shape, (1,), msg="Default value has incorrect shape.") + self.assertTrue(np.allclose(p.value, 0.5), msg="Parameter has incorrect default value") + + p.value = 0.8 + self.assertTrue(np.allclose(p.value, 0.8), msg="Parameter has incorrect value after update") + + p.value = [0.6, 0.8] + self.assertTupleEqual(p.value.shape, (2,), msg="Default value has incorrect shape.") + np.testing.assert_allclose(p.value, np.array([0.6, 0.8]), err_msg="Parameter has incorrect value after update") + + p = GPflowOpt.domain.ContinuousParameter("x1", 0, 1, 0.2) + self.assertTupleEqual(p.value.shape, (1,), msg="Default value has incorrect shape.") + self.assertTrue(np.allclose(p.value, 0.2), msg="Parameter has incorrect initialized value") + + +class TestHypercubeDomain(unittest.TestCase): + + def setUp(self): + self.domain = np.sum([GPflowOpt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1,4)]) + + def test_object_integrity(self): + self.assertEqual(len(self.domain._parameters), 3) + + def test_simple(self): + self.assertEqual(self.domain.size, 3, msg="Size of domain should equal 3") + self.assertTrue(np.allclose(self.domain.lower, -1.0), msg="Lower of domain should equal -1 for all parameters") + self.assertTrue(np.allclose(self.domain.upper, 1.0), msg="Lower of domain should equal 1 for all parameters") + + def test_equality(self): + dne = np.sum([GPflowOpt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1,3)] + + [GPflowOpt.domain.ContinuousParameter("x3", -3, 1)]) + self.assertNotEqual(self.domain, dne, msg="One lower bound mismatch, should not be equal.") + dne = np.sum([GPflowOpt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1, 3)] + + [GPflowOpt.domain.ContinuousParameter("x3", -1, 2)]) + self.assertNotEqual(self.domain, dne, msg="One upper bound mismatch, should not be equal.") + dne = np.sum([GPflowOpt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1, 3)]) + self.assertNotEqual(self.domain, dne, msg="Size mismatch") + de = np.sum([GPflowOpt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1, 4)]) + self.assertEqual(self.domain, de, msg="No mismatches, should be equal") + + def test_parenting(self): + for p in self.domain: + self.assertEqual(id(p._parent), id(self.domain), "Misspecified parent link detected") + + def test_access(self): + for i in range(self.domain.size): + self.assertEqual(self.domain[i].label, "x{0}".format(i+1), "Accessing parameters, encountering " + "incorrect labels") + + self.domain[2].lower = -2 + de = np.sum([GPflowOpt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1, 3)] + + [GPflowOpt.domain.ContinuousParameter("x3", -2, 1)]) + + self.assertEqual(self.domain, de, msg="No mismatches, should be equal") + + def test_containment(self): + A = np.random.rand(50,3)*2-1 + self.assertTrue(A in self.domain, msg="Generated random points within domain") + + A = np.vstack((A, np.array([-2, -2, -2]))) + self.assertFalse(A in self.domain, msg="One of the points was not in the domain") + + A = np.random.rand(50,4)*2-1 + self.assertFalse(A in self.domain, msg="Generated random points have different dimensionality") + + def test_value(self): + self.assertTupleEqual(self.domain.value.shape, (1, 3), msg="Default value has incorrect shape.") + np.testing.assert_allclose(self.domain.value, np.array([[0, 0, 0]]), err_msg="Parameter has incorrect initial value") + + A = np.random.rand(10, 3) * 2 - 1 + self.domain.value = A + self.assertTupleEqual(self.domain.value.shape, (10, 3), msg="Assigned value has incorrect shape.") + np.testing.assert_allclose(self.domain.value, A, err_msg="Parameter has incorrect value after assignment") diff --git a/testing/test_optimizers.py b/testing/test_optimizers.py new file mode 100644 index 0000000..5f6cdf5 --- /dev/null +++ b/testing/test_optimizers.py @@ -0,0 +1,172 @@ +import GPflowOpt +import unittest +import numpy as np +import GPflow + + +def parabola2d(X): + return np.atleast_2d(np.sum(X ** 2, axis=1)).T, 2 * X + + +class KeyboardRaiser: + """ + This wraps a function and makes it raise a KeyboardInterrupt after some number of calls + """ + + def __init__(self, iters_to_raise, f): + self.iters_to_raise, self.f = iters_to_raise, f + self.count = 0 + + def __call__(self, *a, **kw): + self.count += 1 + if self.count >= self.iters_to_raise: + raise KeyboardInterrupt + return self.f(*a, **kw) + + +class _TestOptimizer(object): + def setUp(self): + self.optimizer = None + + @property + def domain(self): + return GPflowOpt.domain.ContinuousParameter("x1", -1.0, 1.0) + \ + GPflowOpt.domain.ContinuousParameter("x2", -1.0, 1.0) + + def test_default_initial(self): + self.assertTupleEqual(self.optimizer._initial.shape, (1, 2), msg="Invalid shape of initial points array") + self.assertTrue(np.allclose(self.optimizer._initial, 0), msg="Default initial point incorrect.") + + def test_set_initial(self): + self.optimizer.set_initial([1, 1]) + self.assertTupleEqual(self.optimizer._initial.shape, (1, 2), msg="Invalid shape of initial points array") + self.assertTrue(np.allclose(self.optimizer._initial, 1), msg="Specified initial point not loaded.") + + +class TestCandidateOptimizer(_TestOptimizer, unittest.TestCase): + def setUp(self): + super(TestCandidateOptimizer, self).setUp() + design = GPflowOpt.design.FactorialDesign(4, self.domain) + self.optimizer = GPflowOpt.optim.CandidateOptimizer(self.domain, design.generate()) + + def test_object_integrity(self): + self.assertTupleEqual(self.optimizer.candidates.shape, (16, 2), msg="Invalid shape of candidate property.") + self.assertTupleEqual(self.optimizer.get_initial().shape, (17, 2), msg="Invalid shape of initial points") + self.assertFalse(self.optimizer.gradient_enabled(), msg="CandidateOptimizer supports no gradients.") + + def test_optimize(self): + result = self.optimizer.optimize(parabola2d) + self.assertTrue(result.success, msg="Optimization should succeed.") + self.assertTrue(np.allclose(result.x, 0), msg="Optimum should be identified") + self.assertTrue(np.allclose(result.fun, 0), msg="Function value in optimum is 0") + self.assertEqual(result.nfev, 17, msg="Number of function evaluations equals candidates + initial points") + + def test_optimize_second(self): + self.optimizer.set_initial([0.67, 0.67]) + result = self.optimizer.optimize(parabola2d) + self.assertGreater(result.fun, 0, msg="Optimum is not amongst candidates and initial points") + self.assertLess(result.fun, 2, msg="Function value not reachable within domain") + + +class TestSciPyOptimizer(_TestOptimizer, unittest.TestCase): + def setUp(self): + super(TestSciPyOptimizer, self).setUp() + self.optimizer = GPflowOpt.optim.SciPyOptimizer(self.domain, maxiter=10) + + def test_object_integrity(self): + self.assertDictEqual(self.optimizer.config, {'tol': None, 'method': 'L-BFGS-B', + 'options': {'maxiter': 10, 'disp': False}}, + msg="Config dict contains invalid entries.") + self.assertTrue(self.optimizer.gradient_enabled(), msg="Gradient is supported.") + + def test_optimize(self): + self.optimizer.set_initial([-1, -1]) + result = self.optimizer.optimize(parabola2d) + self.assertTrue(result.success) + self.assertLessEqual(result.nit, 10, "Only 10 Iterations permitted") + self.assertLessEqual(result.nfev, 20, "Max 20 evaluations permitted") + self.assertTrue(np.allclose(result.x, 0), msg="Optimizer failed to find optimum") + self.assertTrue(np.allclose(result.fun, 0), msg="Incorrect function value returned") + + def test_optimizer_interrupt(self): + self.optimizer.set_initial([-1, -1]) + result = self.optimizer.optimize(KeyboardRaiser(2, parabola2d)) + self.assertFalse(result.success, msg="After one evaluation, a keyboard interrupt is raised, " + "non-succesfull result expected.") + self.assertFalse(np.allclose(result.x, 0), msg="After one iteration, the optimum will not be found") + + +class TestStagedOptimizer(_TestOptimizer, unittest.TestCase): + def setUp(self): + super(TestStagedOptimizer, self).setUp() + self.optimizer = GPflowOpt.optim.StagedOptimizer([GPflowOpt.optim.MCOptimizer(self.domain, 5), + GPflowOpt.optim.SciPyOptimizer(self.domain, maxiter=10)]) + + def test_object_integrity(self): + self.assertEqual(len(self.optimizer.optimizers), 2, msg="Two optimizers expected in optimizerlist") + self.assertFalse(self.optimizer.gradient_enabled(), msg="MCOptimizer supports no gradients => neither " + "does stagedoptimizer.") + + def test_optimize(self): + result = self.optimizer.optimize(parabola2d) + self.assertTrue(result.success) + self.assertLessEqual(result.nfev, 20, "Only 10 Iterations permitted") + self.assertTrue(np.allclose(result.x, 0), msg="Optimizer failed to find optimum") + self.assertTrue(np.allclose(result.fun, 0), msg="Incorrect function value returned") + + def test_optimizer_interrupt(self): + self.optimizer.set_initial([-1, -1]) + result = self.optimizer.optimize(KeyboardRaiser(3, parabola2d)) + self.assertFalse(result.success, msg="After two evaluations, a keyboard interrupt is raised, " + "non-succesfull result expected.") + self.assertFalse(np.allclose(result.x, 0.0), msg="After one iteration, the optimum will not be found") + self.assertEqual(result.nstages, 1, msg="Stage 1 should be in progress during interrupt") + + result = self.optimizer.optimize(KeyboardRaiser(8, parabola2d)) + self.assertFalse(result.success, msg="After 7 evaluations, a keyboard interrupt is raised, " + "non-succesfull result expected.") + self.assertFalse(np.allclose(result.x[0, :], 0.0), msg="The optimum should not be found yet") + self.assertEqual(result.nstages, 2, msg="Stage 2 should be in progress during interrupt") + + +class TestBayesianOptimizer(_TestOptimizer, unittest.TestCase): + def setUp(self): + super(TestBayesianOptimizer, self).setUp() + design = GPflowOpt.design.FactorialDesign(4, self.domain) + X, Y = design.generate(), parabola2d(design.generate())[0] + model = GPflow.gpr.GPR(X, Y, GPflow.kernels.RBF(2, ARD=True, lengthscales=X.var(axis=0))) + model.kern.variance.prior = GPflow.priors.Gamma(3, 1.0 / 3.0) + acquisition = GPflowOpt.acquisition.ExpectedImprovement(model) + self.optimizer = GPflowOpt.BayesianOptimizer(self.domain, acquisition) + + def test_default_initial(self): + self.assertTupleEqual(self.optimizer._initial.shape, (0, 2), msg="Invalid shape of initial points array") + + def test_optimize(self): + result = self.optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=20) + self.assertTrue(result.success) + self.assertEqual(result.nfev, 20, "Only 20 evaluations permitted") + self.assertTrue(np.allclose(result.x, 0), msg="Optimizer failed to find optimum") + self.assertTrue(np.allclose(result.fun, 0), msg="Incorrect function value returned") + + def test_optimizer_interrupt(self): + result = self.optimizer.optimize(KeyboardRaiser(3, lambda X: parabola2d(X)[0]), n_iter=20) + self.assertFalse(result.success, msg="After 2 evaluations, a keyboard interrupt is raised, " + "non-succesfull result expected.") + self.assertTrue(np.allclose(result.x, 0.0), msg="The optimum will not be identified nonetheless") + + def test_initial_design(self): + design = GPflowOpt.design.RandomDesign(5, self.domain) + optimizer = GPflowOpt.BayesianOptimizer(self.domain, self.optimizer.acquisition, initial=design) + + result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=0) + self.assertTrue(result.success) + self.assertEqual(result.nfev, 5, "Evaluated only initial") + self.assertTupleEqual(optimizer.acquisition.data[0].shape, (21, 2)) + self.assertTupleEqual(optimizer.acquisition.data[1].shape, (21, 1)) + + result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=0) + self.assertTrue(result.success) + self.assertEqual(result.nfev, 0, "Initial was not reset") + self.assertTupleEqual(optimizer.acquisition.data[0].shape, (21, 2)) + self.assertTupleEqual(optimizer.acquisition.data[1].shape, (21, 1))