Commit 4532f87a authored by Jean-Baptiste Mouret's avatar Jean-Baptiste Mouret
Browse files

Change the defaults for GP, make BO benchmarks compile

parent 5b0dabf7
......@@ -65,13 +65,17 @@ struct Params {
struct kernel : public defaults::kernel {
BO_PARAM(double, noise, 1e-10);
};
struct kernel_exp : public defaults::kernel_exp {
};
struct kernel_maternfivehalves {
BO_PARAM(double, sigma_sq, 1);
BO_PARAM(double, l, 1);
};
struct acqui_ucb {
struct acqui_ucb : public defaults::acqui_ucb {
BO_PARAM(double, alpha, 0.125);
};
struct acqui_ei : public defaults::acqui_ei {
};
struct init_randomsampling {
BO_PARAM(int, samples, 10);
};
......@@ -82,21 +86,25 @@ struct Params {
};
struct opt_rprop : public defaults::opt_rprop {
};
struct opt_parallelrepeater : public defaults::opt_parallelrepeater {
};
struct opt_nloptnograd : public defaults::opt_nloptnograd {
};
};
struct DirectParams {
struct opt_nloptnograd {
struct opt_nloptnograd : public defaults::opt_nloptnograd {
BO_DYN_PARAM(int, iterations);
};
};
struct BobyqaParams {
struct opt_nloptnograd {
struct opt_nloptnograd : public defaults::opt_nloptnograd {
BO_DYN_PARAM(int, iterations);
};
};
struct BobyqaParams_HP {
struct opt_nloptnograd {
struct opt_nloptnograd : public defaults::opt_nloptnograd {
BO_DYN_PARAM(int, iterations);
};
};
......@@ -139,6 +147,7 @@ int main()
{
srand(time(NULL));
#ifdef BAYESOPT_PARAMS
using Kernel_t = kernel::SquaredExpARD<Params>;
using AcquiOpt_t = opt::Chained<Params, opt::NLOptNoGrad<DirectParams, nlopt::GN_DIRECT_L>, opt::NLOptNoGrad<BobyqaParams, nlopt::LN_BOBYQA>>;
using Stop_t = boost::fusion::vector<stop::MaxIterations<Params>>;
......@@ -150,6 +159,32 @@ int main()
using Opt_t = bayes_opt::BOptimizer<Params, modelfun<GP_t>, initfun<Init_t>, acquifun<Acqui_t>, acquiopt<AcquiOpt_t>, statsfun<Stat_t>, stopcrit<Stop_t>>;
// benchmark different optimization algorithms
#elif defined(OPT_CMAES)
using AcquiOpt_t = opt::Chained<Params, opt::NLOptNoGrad<DirectParams, nlopt::GN_DIRECT_L>, opt::NLOptNoGrad<BobyqaParams, nlopt::LN_BOBYQA>>;
using Opt_t = bayes_opt::BOptimizer<Params, acquiopt<AcquiOpt_t>>;
#elif defined(OPT_DIRECT)
using AcquiOpt_t = opt::Chained<Params, opt::NLOptNoGrad<DirectParams, nlopt::GN_DIRECT_L>, opt::NLOptNoGrad<BobyqaParams, nlopt::LN_BOBYQA>>;
using Opt_t = bayes_opt::BOptimizer<Params, acquiopt<AcquiOpt_t>>;
//benchmark different acquisition functions
#elif defined(ACQ_UCB)
using GP_t = model::GP<Params>;
using Acqui_t = acqui::UCB<Params, GP_t>;
using Opt_t = bayes_opt::BOptimizer<Params, acquifun<Acqui_t>> ;
#elif defined(ACQ_EI)
using GP_t = model::GP<Params>;
using Acqui_t = acqui::EI<Params, GP_t>;
using Opt_t = bayes_opt::BOptimizer<Params, acquifun<Acqui_t>> ;
// limbo default parameters
#elif defined(LIMBO_PARAMS) // default limbo params
using Opt_t = bayes_opt::BOptimizer<Params>;
#else
#error "Unknown variant in benchmark"
#endif
benchmark<Opt_t, BraninNormalized>("branin");
benchmark<Opt_t, Hartmann6>("hartmann6");
benchmark<Opt_t, Hartmann3>("hartmann3");
......
......@@ -81,13 +81,13 @@ struct Params {
};
struct DirectParams {
struct opt_nloptnograd {
struct opt_nloptnograd : public defaults::opt_nloptnograd {
BO_DYN_PARAM(int, iterations);
};
};
struct BobyqaParams {
struct opt_nloptnograd {
struct opt_nloptnograd : public defaults::opt_nloptnograd {
BO_DYN_PARAM(int, iterations);
};
};
......@@ -133,7 +133,7 @@ int main()
using Mean_t = mean::Constant<Params>;
using Stat_t = boost::fusion::vector<>;
using Init_t = init::RandomSampling<Params>;
using GP_t = model::GP<Params, Kernel_t, Mean_t>;
using GP_t = model::GP<Params, Kernel_t, Mean_t, model::gp::NoLFOpt<Params>>;
using Acqui_t = acqui::UCB<Params, GP_t>;
using Opt_t = bayes_opt::BOptimizer<Params, modelfun<GP_t>, initfun<Init_t>, acquifun<Acqui_t>, acquiopt<AcquiOpt_t>, statsfun<Stat_t>, stopcrit<Stop_t>>;
......
......@@ -68,6 +68,17 @@ def configure(conf):
def build_bo_benchmarks(bld):
if bld.env.DEFINES_NLOPT == ['USE_NLOPT']:
limbo.create_variants(bld,
source = 'limbo/hp_opt.cpp',
uselib_local = 'limbo',
uselib = 'BOOST EIGEN TBB NLOPT CMAES',
variants = ['BAYESOPT_PARAMS',
'LIMBO_PARAMS',
'OPT_CMAES',
'OPT_DIRECT',
'ACQ_UCB',
'ACQ_EI'
])
bld.program(features='cxx',
source='limbo/simple.cpp',
includes='. .. ../../ ../../src',
......@@ -75,12 +86,12 @@ def build_bo_benchmarks(bld):
uselib='BOOST EIGEN TBB SFERES NLOPT',
use='limbo')
bld.program(features='cxx',
source='limbo/hp_opt.cpp',
includes='. .. ../../ ../../src',
target='benchmark_limbo_hp_opt',
uselib='BOOST EIGEN TBB SFERES NLOPT',
use='limbo')
# bld.program(features='cxx',
# source='limbo/hp_opt.cpp',
# includes='. .. ../../ ../../src',
# target='benchmark_limbo_hp_opt',
# uselib='BOOST EIGEN TBB SFERES NLOPT',
# use='limbo')
#Quick and dirty way to add Bayesopt
......
......@@ -182,10 +182,7 @@ namespace limbo {
// defaults
struct defaults {
using init_t = init::RandomSampling<Params>; // 1
using kf_t = kernel::Exp<Params>;
using mean_t = mean::Data<Params>;
using model_t = model::GP<Params, kf_t, mean_t>; // 2
using model_t = model::GP<Params>; // 2
// WARNING: you have to specify the acquisition function
// if you use a custom model
using acqui_t = acqui::UCB<Params, model_t>; // 3
......
......@@ -56,6 +56,10 @@
#include <Eigen/LU>
#include <limbo/model/gp/no_lf_opt.hpp>
#include <limbo/kernel/squared_exp_ard.hpp>
#include <limbo/model/gp/kernel_lf_opt.hpp>
#include <limbo/mean/data.hpp>
#include <limbo/tools.hpp>
namespace limbo {
......@@ -63,9 +67,10 @@ namespace limbo {
/// @ingroup model
/// A classic Gaussian process.
/// It is parametrized by:
/// - a kernel function
/// - a mean function
/// - [optionnal] an optimizer for the hyper-parameters
template <typename Params, typename KernelFunction, typename MeanFunction, class HyperParamsOptimizer = gp::NoLFOpt<Params>>
template <typename Params, typename KernelFunction = kernel::SquaredExpARD<Params>, typename MeanFunction = mean::Data<Params>, typename HyperParamsOptimizer = gp::KernelLFOpt<Params>>
class GP {
public:
/// useful because the model might be created before knowing anything about the process
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment