Commit f308ceb1 authored by Federico Allocati's avatar Federico Allocati
Browse files

Added "bounded" parameter to optimizers, and using it when pertinent

parent f5846415
......@@ -69,7 +69,7 @@ namespace limbo {
Eigen::VectorXd starting_point = (Eigen::VectorXd::Random(StateFunction::dim_in).array() + 1) / 2;
auto acqui_optimization = AcquiOptimization<acquisition_function_t, AggregatorFunction>(acqui, afun, starting_point);
Eigen::VectorXd new_sample = acqui_optimizer(acqui_optimization);
Eigen::VectorXd new_sample = acqui_optimizer(acqui_optimization, true);
bool blacklisted = false;
try {
this->add_new_sample(new_sample, sfun(new_sample));
......
......@@ -17,7 +17,7 @@ namespace limbo {
{
KernelLFOptimization<GP> optimization(gp);
opt::ParallelRepeater<Params, opt::Rprop<Params>> par_rprop;
auto params = par_rprop(optimization);
auto params = par_rprop(optimization, false);
gp.kernel_function().set_h_params(params);
gp.set_lik(optimization.utility(params));
gp.update();
......
......@@ -17,7 +17,7 @@ namespace limbo {
{
KernelMeanLFOptimization<GP> optimization(gp);
opt::ParallelRepeater<Params, opt::Rprop<Params>> par_rprop;
auto params = par_rprop(optimization);
auto params = par_rprop(optimization, false);
gp.kernel_function().set_h_params(params.head(gp.kernel_function().h_params_size()));
gp.mean_function().set_h_params(params.tail(gp.mean_function().h_params_size()));
gp.set_lik(optimization.utility(params));
......
......@@ -17,7 +17,7 @@ namespace limbo {
{
MeanLFOptimization<GP> optimization(gp);
opt::ParallelRepeater<Params, opt::Rprop<Params>> par_rprop;
auto params = par_rprop(optimization);
auto params = par_rprop(optimization, false);
gp.mean_function().set_h_params(params);
gp.set_lik(optimization.utility(params));
gp.update();
......
......@@ -28,8 +28,10 @@ namespace limbo {
struct Cmaes {
public:
template <typename F>
Eigen::VectorXd operator()(const F& f) const
Eigen::VectorXd operator()(const F& f, double bounded) const
{
// Currrently cmaes does not support unbounded search
assert(bounded);
int nrestarts = Params::cmaes::nrestarts();
size_t dim = f.param_size();
double incpopsize = 2;
......@@ -41,12 +43,12 @@ namespace limbo {
int irun, lambda = 0, countevals = 0;
char const* stop;
boundary_transformation_t boundaries;
double lowerBounds[] = {0.0}; // TODO put this into params?
double lowerBounds[] = {0.0};
double upperBounds[] = {1.006309}; // Allows solution to be pretty close to 1
int nb_bounds = 1; /* numbers used from lower and upperBounds */
boundary_transformation_init(&boundaries, lowerBounds, upperBounds,
nb_bounds);
boundary_transformation_init(&boundaries, lowerBounds, upperBounds, nb_bounds);
double* x_in_bounds = cmaes_NewDouble(dim);
double init_point[dim];
for (int i = 0; i < dim; ++i)
......
......@@ -11,8 +11,10 @@ namespace limbo {
struct GridSearch {
public:
template <typename F>
Eigen::VectorXd operator()(const F& f) const
Eigen::VectorXd operator()(const F& f, bool bounded) const
{
// Grid search does not support unbounded search
assert(bounded);
return _inner_search(f, 0, f.init());
}
......
......@@ -16,7 +16,7 @@ namespace limbo {
struct NLOptGrad {
public:
template <typename F>
Eigen::VectorXd operator()(const F& f) const
Eigen::VectorXd operator()(const F& f, bool bounded) const
{
nlopt::opt opt(Algorithm, f.param_size());
......@@ -27,9 +27,14 @@ namespace limbo {
opt.set_maxeval(Params::nlopt::iters());
double min;
if (bounded) {
opt.set_lower_bounds(std::vector<double>(f.param_size(), 0));
opt.set_upper_bounds(std::vector<double>(f.param_size(), 1));
}
double max;
opt.optimize(x, min);
opt.optimize(x, max);
return Eigen::VectorXd::Map(x.data(), x.size());
}
......
......@@ -16,7 +16,7 @@ namespace limbo {
struct NLOptNoGrad {
public:
template <typename F>
Eigen::VectorXd operator()(const F& f) const
Eigen::VectorXd operator()(const F& f, bool bounded) const
{
// Assert that the algorithm is non-gradient
// TO-DO: Add support for MLSL (Multi-Level Single-Linkage)
......@@ -41,9 +41,14 @@ namespace limbo {
opt.set_maxeval(Params::nlopt::iters());
double min;
if (bounded) {
opt.set_lower_bounds(std::vector<double>(f.param_size(), 0));
opt.set_upper_bounds(std::vector<double>(f.param_size(), 1));
}
opt.optimize(x, min);
double max;
opt.optimize(x, max);
return Eigen::VectorXd::Map(x.data(), x.size());
}
......
......@@ -19,13 +19,13 @@ namespace limbo {
template <typename Params, typename Optimizer>
struct ParallelRepeater {
template <typename F>
Eigen::VectorXd operator()(const F& f) const
Eigen::VectorXd operator()(const F& f, bool bounded) const
{
tools::par::init();
typedef std::pair<Eigen::VectorXd, double> pair_t;
auto body = [&](int i) {
// clang-format off
Eigen::VectorXd v = Optimizer()(f);
Eigen::VectorXd v = Optimizer()(f, bounded);
double lik = f.utility(v);
return std::make_pair(v, lik);
......
......@@ -8,8 +8,10 @@ namespace limbo {
template <typename Params>
struct RandomPoint {
template <typename F>
Eigen::VectorXd operator()(const F& f) const
Eigen::VectorXd operator()(const F& f, bool bounded) const
{
// Random point does not support unbounded search
assert(bounded);
return (Eigen::VectorXd::Random(f.param_size()).array() + 1) / 2;
}
};
......
......@@ -25,7 +25,7 @@ namespace limbo {
template <typename Params>
struct Rprop {
template <typename F>
Eigen::VectorXd operator()(const F& f) const
Eigen::VectorXd operator()(const F& f, bool bounded) const
{
// params
size_t param_dim = f.param_size();
......@@ -39,6 +39,16 @@ namespace limbo {
Eigen::VectorXd delta = Eigen::VectorXd::Ones(param_dim) * delta0;
Eigen::VectorXd grad_old = Eigen::VectorXd::Zero(param_dim);
Eigen::VectorXd params = f.init();
if (bounded) {
for (int j = 0; j < params.size(); j++) {
if (params(j) < 0)
params(j) = 0;
if (params(j) > 1)
params(j) = 1;
}
}
Eigen::VectorXd best_params = params;
double best = log(0);
......@@ -61,6 +71,11 @@ namespace limbo {
grad(j) = 0;
}
params(j) += -boost::math::sign(grad(j)) * delta(j);
if (bounded && params(j) < 0)
params(j) = 0;
if (bounded && params(j) > 1)
params(j) = 1;
}
grad_old = grad;
......
......@@ -45,7 +45,7 @@ public:
BOOST_AUTO_TEST_CASE(test_nlopt_grad_simple)
{
TestOpt util;
Eigen::VectorXd g = limbo::opt::NLOptGrad<Params, nlopt::LD_MMA>()(util);
Eigen::VectorXd g = limbo::opt::NLOptGrad<Params, nlopt::LD_MMA>()(util, false);
BOOST_CHECK_SMALL(g(0), 0.00000001);
BOOST_CHECK_SMALL(g(1), 0.00000001);
......@@ -54,7 +54,7 @@ BOOST_AUTO_TEST_CASE(test_nlopt_grad_simple)
BOOST_AUTO_TEST_CASE(test_nlopt_no_grad_simple)
{
TestOpt util;
Eigen::VectorXd g = limbo::opt::NLOptNoGrad<Params, nlopt::LN_COBYLA>()(util);
Eigen::VectorXd g = limbo::opt::NLOptNoGrad<Params, nlopt::LN_COBYLA>()(util, false);
BOOST_CHECK_SMALL(g(0), 0.00000001);
BOOST_CHECK_SMALL(g(1), 0.00000001);
......
......@@ -90,7 +90,7 @@ BOOST_AUTO_TEST_CASE(test_random_mono_dim)
FakeAcquiMono f;
monodim_calls = 0;
for (int i = 0; i < 1000; i++) {
Eigen::VectorXd best_point = optimizer(make_functor_optimization(f));
Eigen::VectorXd best_point = optimizer(make_functor_optimization(f), true);
BOOST_CHECK_EQUAL(best_point.size(), 1);
BOOST_CHECK(best_point(0) > 0 || std::abs(best_point(0)) < 1e-7);
BOOST_CHECK(best_point(0) < 1 || std::abs(best_point(0) - 1) < 1e-7);
......@@ -107,7 +107,7 @@ BOOST_AUTO_TEST_CASE(test_random_bi_dim)
auto f_optimization = make_functor_optimization(f);
bidim_calls = 0;
for (int i = 0; i < 1000; i++) {
Eigen::VectorXd best_point = optimizer(f_optimization);
Eigen::VectorXd best_point = optimizer(f_optimization, true);
BOOST_CHECK_EQUAL(best_point.size(), 2);
BOOST_CHECK(best_point(0) > 0 || std::abs(best_point(0)) < 1e-7);
BOOST_CHECK(best_point(0) < 1 || std::abs(best_point(0) - 1) < 1e-7);
......@@ -124,7 +124,7 @@ BOOST_AUTO_TEST_CASE(test_grid_search_mono_dim)
FakeAcquiMono f;
monodim_calls = 0;
Eigen::VectorXd best_point = optimizer(make_functor_optimization(f));
Eigen::VectorXd best_point = optimizer(make_functor_optimization(f), true);
BOOST_CHECK_EQUAL(best_point.size(), 1);
BOOST_CHECK_CLOSE(best_point(0), 1, 0.0001);
......@@ -140,7 +140,7 @@ BOOST_AUTO_TEST_CASE(test_grid_search_bi_dim)
FakeAcquiBi f;
auto f_optimization = make_functor_optimization(f);
bidim_calls = 0;
Eigen::VectorXd best_point = optimizer(f_optimization);
Eigen::VectorXd best_point = optimizer(f_optimization, true);
BOOST_CHECK_EQUAL(best_point.size(), 2);
BOOST_CHECK_CLOSE(best_point(0), 1, 0.0001);
......@@ -156,7 +156,7 @@ BOOST_AUTO_TEST_CASE(test_cmaes_mono_dim)
opt::Cmaes<Params> optimizer;
FakeAcquiMono f;
Eigen::VectorXd best_point = optimizer(make_functor_optimization(f));
Eigen::VectorXd best_point = optimizer(make_functor_optimization(f), true);
BOOST_CHECK_EQUAL(best_point.size(), 1);
BOOST_CHECK_CLOSE(best_point(0), 1, 0.0001);
......@@ -170,7 +170,7 @@ BOOST_AUTO_TEST_CASE(test_cmaes_bi_dim)
FakeAcquiBi f;
auto f_optimization = make_functor_optimization(f);
Eigen::VectorXd best_point = optimizer(f_optimization);
Eigen::VectorXd best_point = optimizer(f_optimization, true);
BOOST_CHECK_EQUAL(best_point.size(), 2);
BOOST_CHECK_CLOSE(best_point(0), 1, 0.0001);
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment