Commit 33979f83 authored by Konstantinos Chatzilygeroudis's avatar Konstantinos Chatzilygeroudis
Browse files

Option for unbounded BO

parent a65707e0
......@@ -69,6 +69,7 @@ namespace limbo {
struct bayes_opt_boptimizer {
BO_PARAM(double, noise, 1e-6);
BO_PARAM(int, hp_period, -1);
BO_PARAM(bool, bounded, true);
};
}
......@@ -153,8 +154,8 @@ namespace limbo {
auto acqui_optimization =
[&](const Eigen::VectorXd& x, bool g) { return acqui(x,afun,g); };
Eigen::VectorXd starting_point = tools::random_vector(StateFunction::dim_in);
Eigen::VectorXd new_sample = acqui_optimizer(acqui_optimization, starting_point, true);
Eigen::VectorXd starting_point = tools::random_vector(StateFunction::dim_in, Params::bayes_opt_boptimizer::bounded());
Eigen::VectorXd new_sample = acqui_optimizer(acqui_optimization, starting_point, Params::bayes_opt_boptimizer::bounded());
this->eval_and_add(sfun, new_sample);
this->_update_stats(*this, afun);
......
......@@ -104,7 +104,7 @@ namespace limbo {
///
/// - this function is thread safe because the random number generator we use is thread-safe
/// - we use a C++11 random number generator
Eigen::VectorXd random_vector(int size)
Eigen::VectorXd random_vector_bounded(int size)
{
static rgen_double_t rgen(0.0, 1.0);
Eigen::VectorXd res(size);
......@@ -112,6 +112,29 @@ namespace limbo {
res[i] = rgen.rand();
return res;
}
/// @ingroup tools
/// random vector in R
///
/// - this function is thread safe because the random number generator we use is thread-safe
/// - we use a C++11 random number generator
Eigen::VectorXd random_vector_unbounded(int size)
{
static rgen_double_t rgen(std::numeric_limits<double>::lowest(), std::numeric_limits<double>::max());
Eigen::VectorXd res(size);
for (int i = 0; i < size; ++i)
res[i] = rgen.rand();
return res;
}
/// @ingroup tools
/// random vector wrapper for both bounded and unbounded versions
Eigen::VectorXd random_vector(int size, bool bounded = true)
{
if (bounded)
return random_vector_bounded(size);
return random_vector_unbounded(size);
}
}
}
......
......@@ -91,7 +91,7 @@ struct Params {
};
struct acqui_ei {
BO_PARAM(double, jitter, 0.0);
BO_PARAM(double, jitter, 0.001);
};
struct init_randomsampling {
......@@ -136,6 +136,19 @@ struct eval1 {
}
};
#ifdef USE_LIBCMAES
template <typename Params, int obs_size = 1>
struct eval_bounded {
BOOST_STATIC_CONSTEXPR int dim_in = 1;
BOOST_STATIC_CONSTEXPR int dim_out = obs_size;
Eigen::VectorXd operator()(const Eigen::VectorXd& x) const
{
return tools::make_vector(-std::pow(x(0) - 2.5, 2.0));
}
};
#endif
BOOST_AUTO_TEST_CASE(test_bo_inheritance)
{
using namespace limbo;
......@@ -168,6 +181,42 @@ BOOST_AUTO_TEST_CASE(test_bo_inheritance)
BOOST_CHECK(opt.total_iterations() == 1);
}
#ifdef USE_LIBCMAES
BOOST_AUTO_TEST_CASE(test_bo_unbounded)
{
using namespace limbo;
struct Parameters {
struct bayes_opt_bobase {
BO_PARAM(bool, stats_enabled, false);
};
struct bayes_opt_boptimizer : public defaults::bayes_opt_boptimizer {
BO_PARAM(double, noise, 0.0);
BO_PARAM(int, hp_period, -1);
BO_PARAM(bool, bounded, false);
};
struct opt_cmaes : public defaults::opt_cmaes {
};
};
typedef kernel::Exp<Params> Kernel_t;
typedef opt::Cmaes<Parameters> AcquiOpt_t;
typedef boost::fusion::vector<stop::MaxIterations<Params>> Stop_t;
typedef mean::Data<Params> Mean_t;
typedef boost::fusion::vector<stat::ConsoleSummary<Params>> Stat_t;
typedef init::RandomSampling<Params> Init_t;
typedef model::GP<Params, Kernel_t, Mean_t> GP_t;
typedef acqui::UCB<Params, GP_t> Acqui_t;
bayes_opt::BOptimizer<Parameters, modelfun<GP_t>, initfun<Init_t>, acquifun<Acqui_t>, acquiopt<AcquiOpt_t>, statsfun<Stat_t>, stopcrit<Stop_t>> opt;
opt.optimize(eval_bounded<Params>());
BOOST_CHECK_CLOSE(opt.best_sample()(0), 2.5, 10);
}
#endif
BOOST_AUTO_TEST_CASE(test_bo_gp)
{
using namespace limbo;
......
......@@ -139,12 +139,12 @@ BOOST_AUTO_TEST_CASE(test_gp_check_lf_grad)
// Random samples and test samples
int N = 40, M = 10;
for (size_t i = 0; i < N; i++) {
for (int i = 0; i < N; i++) {
samples.push_back(tools::random_vector(4));
observations.push_back(tools::random_vector(2));
}
for (size_t i = 0; i < M; i++) {
for (int i = 0; i < M; i++) {
test_samples.push_back(tools::random_vector(4));
test_samples_mean.push_back(tools::random_vector(6));
test_samples_kernel_mean.push_back(tools::random_vector(6 + 4));
......@@ -156,7 +156,7 @@ BOOST_AUTO_TEST_CASE(test_gp_check_lf_grad)
Eigen::VectorXd results(M);
for (size_t i = 0; i < M; i++) {
for (int i = 0; i < M; i++) {
auto res = check_grad(kernel_optimization, test_samples[i], 1e-4);
results(i) = std::get<0>(res);
// std::cout << std::get<1>(res).transpose() << " vs " << std::get<2>(res).transpose() << " --> " << results(i) << std::endl;
......@@ -166,7 +166,7 @@ BOOST_AUTO_TEST_CASE(test_gp_check_lf_grad)
model::gp::KernelMeanLFOpt<Params>::KernelMeanLFOptimization<GP_t> kernel_mean_optimization(gp);
for (size_t i = 0; i < M; i++) {
for (int i = 0; i < M; i++) {
auto res = check_grad(kernel_mean_optimization, test_samples_kernel_mean[i], 1e-4);
results(i) = std::get<0>(res);
// std::cout << std::get<1>(res).transpose() << " vs " << std::get<2>(res).transpose() << " --> " << results(i) << std::endl;
......@@ -176,7 +176,7 @@ BOOST_AUTO_TEST_CASE(test_gp_check_lf_grad)
model::gp::MeanLFOpt<Params>::MeanLFOptimization<GP_t> mean_optimization(gp);
for (size_t i = 0; i < M; i++) {
for (int i = 0; i < M; i++) {
auto res = check_grad(mean_optimization, test_samples_mean[i], 1e-4);
results(i) = std::get<0>(res);
// std::cout << std::get<1>(res).transpose() << " vs " << std::get<2>(res).transpose() << " --> " << results(i) << std::endl;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment