Commit f66d14e6 authored by Vaios Papaspyros's avatar Vaios Papaspyros
Browse files

- ehvi experiment added and compiles

- sferes variant fails on execution but compiles
- create a new separate stat header for the experiment (same code as before)
- Code refactoring
parent 0d6150de
......@@ -2,31 +2,28 @@
#include <limbo/experimental/bayes_opt/parego.hpp>
#include <limbo/experimental/bayes_opt/nsbo.hpp>
#include <limbo/experimental/bayes_opt/ehvi.hpp>
#include <limbo/experimental/stat/pareto_benchmark.hpp>
using namespace limbo;
using namespace limbo::experimental;
struct Params {
struct boptimizer {
struct bayes_opt_boptimizer : public defaults::bayes_opt_boptimizer {
BO_PARAM(double, noise, 0.01);
BO_PARAM(int, dump_period, 1);
};
struct init {
BO_PARAM(int, nb_samples, 10);
// calandra: number of dimensions * 5
// knowles : 11 * dim - 1
struct init_randomsampling {
BO_PARAM(int, samples, 10);
};
struct bayes_opt_bobase {
BO_PARAM(bool, stats_enabled, false);
struct kernel_exp : public defaults::kernel_exp {
};
struct bayes_opt_parego : public defaults::bayes_opt_parego {
struct bayes_opt_bobase {
BO_PARAM(bool, stats_enabled, true);
};
struct maxiterations {
BO_PARAM(int, n_iterations, 30);
struct stop_maxiterations {
BO_PARAM(int, iterations, 30);
};
struct acqui_ucb : public defaults::acqui_ucb {
......@@ -56,7 +53,7 @@ struct Params {
#endif
struct zdt1 {
static constexpr size_t dim = ZDT_DIM;
static constexpr size_t dim_in = ZDT_DIM;
Eigen::VectorXd operator()(const Eigen::VectorXd& x) const
{
Eigen::VectorXd res(2);
......@@ -73,7 +70,7 @@ struct zdt1 {
};
struct zdt2 {
static constexpr size_t dim = ZDT_DIM;
static constexpr size_t dim_in = ZDT_DIM;
Eigen::VectorXd operator()(const Eigen::VectorXd& x) const
{
Eigen::VectorXd res(2);
......@@ -90,7 +87,7 @@ struct zdt2 {
};
struct zdt3 {
static constexpr size_t dim = ZDT_DIM;
static constexpr size_t dim_in = ZDT_DIM;
Eigen::VectorXd operator()(const Eigen::VectorXd& x) const
{
Eigen::VectorXd res(2);
......@@ -107,7 +104,9 @@ struct zdt3 {
};
struct mop2 {
static constexpr size_t dim = 2;
static constexpr size_t dim_in = 2;
static constexpr size_t dim_out = 2;
Eigen::VectorXd operator()(const Eigen::VectorXd& x) const
{
Eigen::VectorXd res(2);
......@@ -116,70 +115,15 @@ struct mop2 {
// f1, f2
Eigen::VectorXd v1 = (xx.array() - 1.0 / sqrt(xx.size())).array().square();
Eigen::VectorXd v2 = (xx.array() + 1.0 / sqrt(xx.size())).array().square();
double f1 = 1.0 - ::exp(-v1.sum());
double f2 = 1.0 - ::exp(-v2.sum());
double f1 = 1.0 - exp(-v1.sum());
double f2 = 1.0 - exp(-v2.sum());
// we _maximize in [0:1]
res(0) = -f1 + 1;
res(1) = -f2 + 1;
res(0) = 1 - f1;
res(1) = 1 - f2;
return res;
}
};
namespace limbo {
namespace stat {
template <typename F>
struct ParetoBenchmark {
template <typename BO>
void operator()(BO& opt)
{
opt.update_pareto_data();
#ifndef NSBO // this is already done is NSBO
opt.template update_pareto_model<F::dim>();
#endif
auto dir = opt.res_dir() + "/";
auto p_model = opt.pareto_model();
auto p_data = opt.pareto_data();
std::string it = std::to_string(opt.current_iteration());
std::string model = dir + "pareto_model_" + it + ".dat";
std::string model_real = dir + "pareto_model_real_" + it + ".dat";
std::string data = dir + "pareto_data_" + it + ".dat";
std::string obs_f = dir + "obs_" + it + ".dat";
std::ofstream pareto_model(model.c_str()), pareto_data(data.c_str()),
pareto_model_real(model_real.c_str()), obs(obs_f.c_str());
F f;
for (auto x : p_model)
pareto_model << std::get<1>(x).transpose() << " "
<< std::get<2>(x).transpose() << std::endl;
for (auto x : p_model)
pareto_model_real << f(std::get<0>(x)).transpose() << " " << std::endl;
for (auto x : p_data)
pareto_data << std::get<1>(x).transpose() << std::endl;
for (size_t i = 0; i < opt.observations().size(); ++i)
obs << opt.observations()[i].transpose() << " "
<< opt.samples()[i].transpose() << std::endl;
/*
std::string m1 = "model_" + it + ".dat";
std::ofstream m1f(m1.c_str());
for (float x = 0; x < 1; x += 0.01)
for (float y = 0; y < 1; y += 0.01) {
Eigen::VectorXd v(2);
v << x, y;
m1f << x << " " << y << " "
<< opt.models()[0].mu(v) << " "
<< opt.models()[0].sigma(v) << " "
<< opt.models()[1].mu(v) << " "
<< opt.models()[1].sigma(v) << std::endl;
}
*/
std::cout << "stats done" << std::endl;
}
};
}
}
int main()
{
tools::par::init();
......@@ -190,21 +134,20 @@ int main()
typedef zdt2 func_t;
#elif defined ZDT3
typedef zdt3 func_t;
#elif defined MOP2
typedef mop2 func_t;
#else
typedef mop2 func_t;
#endif
typedef stat::ParetoBenchmark<func_t> stat_t;
using stat_t = boost::fusion::vector<experimental::stat::ParetoBenchmark<func_t>>;
#ifdef PAREGO
Parego<Params, statsfun<stat_t>> opt;
#elif defined(NSBO)
Nsbo<Params, statsfun<stat_t>> opt;
#else
exp::bayes_opt::Ehvi<Params, statsfun<stat_t>> opt;
experimental::bayes_opt::Ehvi<Params, statsfun<stat_t>> opt;
#endif
opt.optimize(func_t());
return 0;
}
......@@ -15,3 +15,10 @@ def build(bld):
target = 'parego',
uselib = 'BOOST EIGEN TBB SFERES LIBCMAES NLOPT',
use = 'limbo')
obj = bld.program(features = 'cxx',
source = 'multi.cpp',
includes = '.. ../.. ../../../',
target = 'multi',
uselib = 'BOOST EIGEN TBB SFERES LIBCMAES NLOPT',
use = 'limbo')
#ifndef LIMBO_ACQUI_EHVI_HPP
#define LIMBO_ACQUI_EHVI_HPP
#ifndef LIMBO_EXPERIMENTAL_ACQUI_EHVI_HPP
#define LIMBO_EXPERIMENTAL_ACQUI_EHVI_HPP
#include <vector>
......@@ -22,11 +22,12 @@ namespace limbo {
size_t dim() const { return _models[0].dim(); }
double operator()(const Eigen::VectorXd& v) const
template <typename AggregatorFunction = FirstElem>
double operator()(const Eigen::VectorXd& v, const AggregatorFunction& afun = AggregatorFunction()) const
{
assert(_models.size() == 2);
double r[3] = {_ref_point(0), _ref_point(1), _ref_point(2)};
double mu[3] = {_models[0].mu(v), _models[1].mu(v), 0};
double mu[3] = {afun(_models[0].mu(v)), afun(_models[1].mu(v)), 0};
double s[3] = {_models[0].sigma(v), _models[1].sigma(v), 0};
double ehvi = ehvi2d(_pop, r, mu, s);
return ehvi;
......
#ifndef LIMBO_BAYES_OPT_BO_MULTI_HPP
#define LIMBO_BAYES_OPT_BO_MULTI_HPP
#define VERSION "xxx"
#include <Eigen/Core>
......@@ -15,7 +14,7 @@
#endif
#include <limbo/bayes_opt/bo_base.hpp>
#include <limbo/experimental/bayes_opt/pareto.hpp>
#include <limbo/experimental/tools/pareto.hpp>
namespace limbo {
namespace experimental {
......@@ -47,14 +46,17 @@ namespace limbo {
};
};
SFERES_FITNESS(SferesFitBase, sferes::fit::Fitness){
template <typename Indiv>
void eval(const Indiv& indiv){}};
template <typename M>
class SferesFit {
class SferesFit : public SferesFitBase<> {
public:
SferesFit(const std::vector<M>& models) : _models(models) {}
SferesFit() {}
const std::vector<float>& objs() const { return _objs; }
float obj(size_t i) const { return _objs[i]; }
template <typename Indiv>
......@@ -66,12 +68,11 @@ namespace limbo {
v[j] = indiv.data(j);
// we protect against overestimation because this has some spurious effect
for (size_t i = 0; i < _models.size(); ++i)
this->_objs[i] = std::min(_models[i].mu(v), _models[i].max_observation());
this->_objs[i] = std::min(_models[i].mu(v)(0), _models[i].max_observation()(0));
}
protected:
std::vector<M> _models;
std::vector<float> _objs;
};
#endif
}
......@@ -93,7 +94,6 @@ namespace limbo {
boost::parameter::optional<tag::stopcrit>,
boost::parameter::optional<tag::modelfun>> bo_multi_signature;
// clang-format off
template <class Params,
class A1 = boost::parameter::void_,
......@@ -103,22 +103,22 @@ namespace limbo {
class A5 = boost::parameter::void_,
class A6 = boost::parameter::void_>
// clang-format on
class BoMulti : public limbo::bayes_opt::BoBase<Params, A2, A3, A4, A5, A6> {
class BoMulti : public limbo::bayes_opt::BoBase<Params, A1, A2, A3, A4, A5, A6> {
public:
struct defaults {
struct defaults {
#ifdef USE_LIBCMAES
typedef opt::Cmaes<Params> acquiopt_t;
typedef opt::Cmaes<Params> acquiopt_t;
#elif defined(USE_NLOPT)
typedef opt::NLOptNoGrad<Params, nlopt::GN_DIRECT_L_RAND> acquiopt_t;
typedef opt::NLOptNoGrad<Params, nlopt::GN_DIRECT_L_RAND> acquiopt_t;
#else
#warning NO NLOpt, and NO Libcmaes: the acquisition function will be optimized by a grid search algorithm (which is usually bad). Please install at least NLOpt or libcmaes to use limbo!.
typedef opt::GridSearch<Params> acquiopt_t;
typedef opt::GridSearch<Params> acquiopt_t;
#endif
};
typedef typename bo_multi_signature::bind<A1, A2, A3, A4, A5, A6>::type args;
typedef typename boost::parameter::binding<args, tag::acquiopt, typename defaults::acquiopt_t>::type acqui_optimizer_t;
};
typedef typename bo_multi_signature::bind<A1, A2, A3, A4, A5, A6>::type args;
typedef typename boost::parameter::binding<args, tag::acquiopt, typename defaults::acquiopt_t>::type acqui_optimizer_t;
typedef limbo::bayes_opt::BoBase<Params, A2, A3, A4, A5, A6> base_t;
typedef limbo::bayes_opt::BoBase<Params, A1, A2, A3, A4, A5, A6> base_t;
typedef typename base_t::model_t model_t;
typedef typename base_t::acquisition_function_t acquisition_function_t;
// point, obj, sigma
......@@ -203,14 +203,15 @@ namespace limbo {
void _update_models()
{
size_t dim = this->_samples[0].size();
std::vector<std::vector<double>> uni_obs(nb_objs());
std::vector<std::vector<Eigen::VectorXd>> uni_obs(nb_objs());
for (size_t i = 0; i < this->_observations.size(); ++i)
for (int j = 0; j < this->_observations[i].size(); ++j)
uni_obs[j].push_back(this->_observations[i][j]);
std::vector<model_t> models(nb_objs(), model_t(dim));
uni_obs[j].push_back(Eigen::VectorXd::Constant(1, this->_observations[i][j]));
std::vector<model_t> models(nb_objs(), model_t(dim, 1));
_models = models;
for (size_t i = 0; i < uni_obs.size(); ++i)
_models[i].compute(this->_samples, uni_obs[i], 1e-5);
for (size_t i = 0; i < uni_obs.size(); ++i) {
_models[i].compute(this->_samples, uni_obs[i], Eigen::VectorXd::Constant(this->_samples.size(), 1e-5));
}
}
};
}
......
#ifndef LIMBO_BAYES_OPT_EHVI_HPP
#define LIMBO_BAYES_OPT_EHVI_HPP
#ifndef LIMBO_EXPERIMENTAL_BAYES_OPT_EHVI_HPP
#define LIMBO_EXPERIMENTAL_BAYES_OPT_EHVI_HPP
#include <algorithm>
......@@ -31,7 +31,7 @@ namespace limbo {
class Ehvi : public BoMulti<Params, A2, A3, A4, A5, A6> {
public:
typedef std::tuple<Eigen::VectorXd, Eigen::VectorXd, Eigen::VectorXd> pareto_point_t;
typedef limbo::bayes_opt::BoBase<Params, A3, A4, A5, A6> base_t;
typedef limbo::experimental::bayes_opt::BoMulti<Params, A3, A4, A5, A6> base_t;
typedef typename base_t::model_t model_t;
typedef typename base_t::acqui_optimizer_t acqui_optimizer_t;
......@@ -44,7 +44,7 @@ namespace limbo {
while (this->_samples.size() == 0 || !this->_stop(*this, FirstElem())) {
std::cout.flush();
this->template update_pareto_model<EvalFunction::dim>();
this->template update_pareto_model<EvalFunction::dim_in>();
this->update_pareto_data();
// copy in the ehvi structure to compute expected improvement
......@@ -69,20 +69,26 @@ namespace limbo {
typedef std::pair<Eigen::VectorXd, double> pair_t;
pair_t init(Eigen::VectorXd::Zero(1), -std::numeric_limits<float>::max());
auto body = [&](int i) -> pair_t {
// clang-format off
auto x = this->pareto_data()[i];
// clang-format off
auto x = this->pareto_data()[i];
auto acqui_optimization = AcquiOptimization<acquisition_function_t, AggregatorFunction>(acqui, afun, starting_point);
Eigen::VectorXd new_sample = acqui_optimizer(acqui_optimization, true);
auto acqui_optimization =
[&](const Eigen::VectorXd& x, bool g) { return opt::no_grad(acqui(x)); };
Eigen::VectorXd s = inner_opt(acqui, acqui.dim(), std::get<0>(x), FirstElem());
double hv = acqui(s);
return std::make_pair(s, hv);
// TODO recheck
// auto acqui_optimization = AcquiOptimization<acquisition_function_t, AggregatorFunction>(acqui, afun, starting_point);
// acqui_optimizer_t acqui_optimizer;
// Eigen::VectorXd new_sample = acqui_optimizer(acqui_optimization, true);
Eigen::VectorXd s = inner_opt(acqui_optimization, std::get<0>(x), true);
double hv = acqui(s);
return std::make_pair(s, hv);
// clang-format on
};
auto comp = [](const pair_t& v1, const pair_t& v2) {
// clang-format off
return v1.second > v2.second;
// clang-format off
return v1.second > v2.second;
// clang-format on
};
auto m = tools::par::max(init, this->pareto_data().size(), body, comp);
......
#ifndef LIMBO_STAT_PARETO_FRONT_HPP
#define LIMBO_STAT_PARETO_FRONT_HPP
#include <limbo/limbo.hpp>
namespace limbo {
namespace experimental {
namespace stat {
template <typename F>
struct ParetoBenchmark {
template <typename BO, typename AggregatorFunction>
void operator()(BO& opt, const AggregatorFunction& afun, bool blacklisted)
{
opt.update_pareto_data();
#ifndef NSBO // this is already done is NSBO
opt.template update_pareto_model<F::dim_in>();
#endif
auto dir = opt.res_dir() + "/";
auto p_model = opt.pareto_model();
auto p_data = opt.pareto_data();
std::string it = std::to_string(opt.current_iteration());
std::string model = dir + "pareto_model_" + it + ".dat";
std::string model_real = dir + "pareto_model_real_" + it + ".dat";
std::string data = dir + "pareto_data_" + it + ".dat";
std::string obs_f = dir + "obs_" + it + ".dat";
std::ofstream pareto_model(model.c_str()), pareto_data(data.c_str()),
pareto_model_real(model_real.c_str()), obs(obs_f.c_str());
F f;
for (auto x : p_model)
pareto_model << std::get<1>(x).transpose() << " "
<< std::get<2>(x).transpose() << std::endl;
for (auto x : p_model)
pareto_model_real << f(std::get<0>(x)).transpose() << " " << std::endl;
for (auto x : p_data)
pareto_data << std::get<1>(x).transpose() << std::endl;
for (size_t i = 0; i < opt.observations().size(); ++i)
obs << opt.observations()[i].transpose() << " "
<< opt.samples()[i].transpose() << std::endl;
std::cout << "stats done" << std::endl;
}
};
}
}
}
#endif
......@@ -5,54 +5,55 @@
#include <limbo/experimental/tools/pareto.hpp>
namespace limbo {
namespace experimental {
namespace stat {
template <typename Params>
struct ParetoFront : public limbo::stat::StatBase<Params> {
// point, obj, sigma
typedef std::tuple<Eigen::VectorXd, Eigen::VectorXd, Eigen::VectorXd> pareto_point_t;
typedef std::vector<pareto_point_t> pareto_t;
namespace experimental {
namespace stat {
template <typename Params>
struct ParetoFront : public limbo::stat::StatBase<Params> {
// point, obj, sigma
typedef std::tuple<Eigen::VectorXd, Eigen::VectorXd, Eigen::VectorXd> pareto_point_t;
typedef std::vector<pareto_point_t> pareto_t;
template <typename BO, typename AggregatorFunction>
void operator()(const BO& bo, const AggregatorFunction&, bool blacklisted)
{
if (!bo.stats_enabled() || bo.observations().empty())
return;
std::string fname = bo.res_dir() + "/" + "pareto_front_" + std::to_string(bo.current_iteration()) + ".dat";
std::ofstream ofs(fname.c_str());
auto pareto = _pareto_data(bo);
for (auto x : pareto) {
ofs << std::get<0>(x).transpose() << " "
<< std::get<1>(x).transpose() << std::endl;
template <typename BO, typename AggregatorFunction>
void operator()(const BO& bo, const AggregatorFunction&, bool blacklisted)
{
if (!bo.stats_enabled() || bo.observations().empty())
return;
std::string fname = bo.res_dir() + "/" + "pareto_front_" + std::to_string(bo.current_iteration()) + ".dat";
std::ofstream ofs(fname.c_str());
auto pareto = _pareto_data(bo);
for (auto x : pareto) {
ofs << std::get<0>(x).transpose() << " "
<< std::get<1>(x).transpose() << std::endl;
}
}
}
protected:
template<typename BO>
pareto_t _pareto_data(const BO& bo)
{
std::vector<Eigen::VectorXd> v(bo.samples().size());
size_t dim = bo.observations().size();
std::fill(v.begin(), v.end(), Eigen::VectorXd::Zero(dim));
return pareto::pareto_set<1>(
_pack_data(bo.samples(), bo.observations(), v));
}
pareto_t _pack_data(const std::vector<Eigen::VectorXd>& points,
const std::vector<Eigen::VectorXd>& objs,
const std::vector<Eigen::VectorXd>& sigma) const
{
assert(points.size() == objs.size());
assert(sigma.size() == objs.size());
pareto_t p(points.size());
tools::par::loop(0, p.size(), [&](size_t k) {
protected:
template <typename BO>
pareto_t _pareto_data(const BO& bo)
{
std::vector<Eigen::VectorXd> v(bo.samples().size());
size_t dim = bo.observations().size();
std::fill(v.begin(), v.end(), Eigen::VectorXd::Zero(dim));
return pareto::pareto_set<1>(
_pack_data(bo.samples(), bo.observations(), v));
}
pareto_t _pack_data(const std::vector<Eigen::VectorXd>& points,
const std::vector<Eigen::VectorXd>& objs,
const std::vector<Eigen::VectorXd>& sigma) const
{
assert(points.size() == objs.size());
assert(sigma.size() == objs.size());
pareto_t p(points.size());
tools::par::loop(0, p.size(), [&](size_t k) {
// clang-format off
p[k] = std::make_tuple(points[k], objs[k], sigma[k]);
// clang-format on
});
return p;
}
};
// clang-format on
});
return p;
}
};
}
}
}
}
#endif
......@@ -206,7 +206,7 @@ namespace limbo {
std::cout << "WARNING max_observation with multi dimensional "
"observations doesn't make sense"
<< std::endl;
return _observations.maxCoeff();
return Eigen::VectorXd(_observations.maxCoeff());
}
/// return the mean observation (only call this if the output of the GP is of dimension 1)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment