Commit f319e2a4 authored by Konstantinos Chatzilygeroudis's avatar Konstantinos Chatzilygeroudis Committed by GitHub
Browse files

Merge pull request #145 from resibots/bench_dev

Changes in benchmarks
parents c2be9d46 d2c8c928
...@@ -258,11 +258,13 @@ struct SixHumpCamel { ...@@ -258,11 +258,13 @@ struct SixHumpCamel {
static constexpr size_t dim_out = 1; static constexpr size_t dim_out = 1;
double operator()(const vectord& x) const double operator()(const vectord& x) const
{ {
double x1_2 = x(0) * x(0); double x1 = -3 + 6 * x(0);
double x2_2 = x(1) * x(1); double x2 = -2 + 4 * x(1);
double x1_2 = x1 * x1;
double x2_2 = x2 * x2;
double tmp1 = (4 - 2.1 * x1_2 + (x1_2 * x1_2) / 3) * x1_2; double tmp1 = (4 - 2.1 * x1_2 + (x1_2 * x1_2) / 3) * x1_2;
double tmp2 = x(0) * x(1); double tmp2 = x1 * x2;
double tmp3 = (-4 + 4 * x2_2) * x2_2; double tmp3 = (-4 + 4 * x2_2) * x2_2;
return tmp1 + tmp2 + tmp3; return tmp1 + tmp2 + tmp3;
} }
...@@ -272,6 +274,10 @@ struct SixHumpCamel { ...@@ -272,6 +274,10 @@ struct SixHumpCamel {
matrixd sols(2, 2); matrixd sols(2, 2);
sols <<= 0.0898, -0.7126, sols <<= 0.0898, -0.7126,
-0.0898, 0.7126; -0.0898, 0.7126;
sols(0, 0) = (sols(0, 0) + 3.0) / 6.0;
sols(1, 0) = (sols(1, 0) + 3.0) / 6.0;
sols(0, 1) = (sols(0, 1) + 2.0) / 4.0;
sols(1, 1) = (sols(1, 1) + 2.0) / 4.0;
return sols; return sols;
} }
}; };
......
...@@ -230,11 +230,13 @@ struct SixHumpCamel { ...@@ -230,11 +230,13 @@ struct SixHumpCamel {
static constexpr size_t dim_out = 1; static constexpr size_t dim_out = 1;
double operator()(const Eigen::VectorXd& x) const double operator()(const Eigen::VectorXd& x) const
{ {
double x1_2 = x(0) * x(0); double x1 = -3 + 6 * x(0);
double x2_2 = x(1) * x(1); double x2 = -2 + 4 * x(1);
double x1_2 = x1 * x1;
double x2_2 = x2 * x2;
double tmp1 = (4 - 2.1 * x1_2 + (x1_2 * x1_2) / 3) * x1_2; double tmp1 = (4 - 2.1 * x1_2 + (x1_2 * x1_2) / 3) * x1_2;
double tmp2 = x(0) * x(1); double tmp2 = x1 * x2;
double tmp3 = (-4 + 4 * x2_2) * x2_2; double tmp3 = (-4 + 4 * x2_2) * x2_2;
return tmp1 + tmp2 + tmp3; return tmp1 + tmp2 + tmp3;
} }
...@@ -244,6 +246,10 @@ struct SixHumpCamel { ...@@ -244,6 +246,10 @@ struct SixHumpCamel {
Eigen::MatrixXd sols(2, 2); Eigen::MatrixXd sols(2, 2);
sols << 0.0898, -0.7126, sols << 0.0898, -0.7126,
-0.0898, 0.7126; -0.0898, 0.7126;
sols(0, 0) = (sols(0, 0) + 3.0) / 6.0;
sols(1, 0) = (sols(1, 0) + 3.0) / 6.0;
sols(0, 1) = (sols(0, 1) + 2.0) / 4.0;
sols(1, 1) = (sols(1, 1) + 2.0) / 4.0;
return sols; return sols;
} }
}; };
......
...@@ -117,7 +117,7 @@ namespace limbo { ...@@ -117,7 +117,7 @@ namespace limbo {
} }
if (Params::bayes_opt_boptimizer::hp_period() > 0 if (Params::bayes_opt_boptimizer::hp_period() > 0
&& this->_current_iteration % Params::bayes_opt_boptimizer::hp_period() == 0) && (this->_current_iteration + 1) % Params::bayes_opt_boptimizer::hp_period() == 0)
_model.optimize_hyperparams(); _model.optimize_hyperparams();
this->_current_iteration++; this->_current_iteration++;
......
...@@ -33,7 +33,7 @@ namespace limbo { ...@@ -33,7 +33,7 @@ namespace limbo {
{ {
//assert(Params::SquaredExpARD::k()<dim); //assert(Params::SquaredExpARD::k()<dim);
Eigen::VectorXd p = Eigen::VectorXd::Zero(_ell.size() + _ell.size() * Params::kernel_squared_exp_ard::k()); Eigen::VectorXd p = Eigen::VectorXd::Zero(_ell.size() + _ell.size() * Params::kernel_squared_exp_ard::k());
p.head(_ell.size()) = Eigen::VectorXd::Ones(_ell.size()) * -1; p.head(_ell.size()) = Eigen::VectorXd::Ones(_ell.size());
this->set_h_params(p); this->set_h_params(p);
_sf2 = Params::kernel_squared_exp_ard::sigma_sq(); _sf2 = Params::kernel_squared_exp_ard::sigma_sq();
} }
...@@ -46,7 +46,7 @@ namespace limbo { ...@@ -46,7 +46,7 @@ namespace limbo {
{ {
_h_params = p; _h_params = p;
for (size_t i = 0; i < _input_dim; ++i) for (size_t i = 0; i < _input_dim; ++i)
_ell(i) = std::exp(p(i)); _ell(i) = p(i);
for (size_t j = 0; j < (unsigned int)Params::kernel_squared_exp_ard::k(); ++j) for (size_t j = 0; j < (unsigned int)Params::kernel_squared_exp_ard::k(); ++j)
for (size_t i = 0; i < _input_dim; ++i) for (size_t i = 0; i < _input_dim; ++i)
_A(i, j) = p((j + 1) * _input_dim + i); //can be negative _A(i, j) = p((j + 1) * _input_dim + i); //can be negative
......
...@@ -18,11 +18,10 @@ namespace limbo { ...@@ -18,11 +18,10 @@ namespace limbo {
template <typename GP> template <typename GP>
void operator()(GP& gp) const void operator()(GP& gp) const
{ {
int dim = gp.kernel_function().h_params_size();
KernelLFOptimization<GP> optimization(gp); KernelLFOptimization<GP> optimization(gp);
Optimizer optimizer; Optimizer optimizer;
auto params = optimizer(optimization, tools::random_vector(dim), false); auto params = optimizer(optimization, gp.kernel_function().h_params(), true);
gp.kernel_function().set_h_params(params); gp.kernel_function().set_h_params(-6.0 + params.array() * 7.0);
gp.set_lik(opt::eval(optimization, params)); gp.set_lik(opt::eval(optimization, params));
gp.recompute(false); gp.recompute(false);
} }
...@@ -36,7 +35,7 @@ namespace limbo { ...@@ -36,7 +35,7 @@ namespace limbo {
opt::eval_t operator()(const Eigen::VectorXd& params, bool compute_grad) const opt::eval_t operator()(const Eigen::VectorXd& params, bool compute_grad) const
{ {
GP gp(this->_original_gp); GP gp(this->_original_gp);
gp.kernel_function().set_h_params(params); gp.kernel_function().set_h_params(-6.0 + params.array() * 7.0);
gp.recompute(false); gp.recompute(false);
......
...@@ -79,6 +79,11 @@ namespace limbo { ...@@ -79,6 +79,11 @@ namespace limbo {
} }
catch (nlopt::roundoff_limited& e) { catch (nlopt::roundoff_limited& e) {
// In theory it's ok to ignore this error // In theory it's ok to ignore this error
std::cerr << "[NLOptGrad]: " << e.what() << std::endl;
}
catch (std::invalid_argument& e) {
// In theory it's ok to ignore this error
std::cerr << "[NLOptGrad]: " << e.what() << std::endl;
} }
return Eigen::VectorXd::Map(x.data(), x.size()); return Eigen::VectorXd::Map(x.data(), x.size());
......
...@@ -96,6 +96,11 @@ namespace limbo { ...@@ -96,6 +96,11 @@ namespace limbo {
} }
catch (nlopt::roundoff_limited& e) { catch (nlopt::roundoff_limited& e) {
// In theory it's ok to ignore this error // In theory it's ok to ignore this error
std::cerr << "[NLOptNoGrad]: " << e.what() << std::endl;
}
catch (std::invalid_argument& e) {
// In theory it's ok to ignore this error
std::cerr << "[NLOptNoGrad]: " << e.what() << std::endl;
} }
return Eigen::VectorXd::Map(x.data(), x.size()); return Eigen::VectorXd::Map(x.data(), x.size());
......
...@@ -29,8 +29,9 @@ BOOST_AUTO_TEST_CASE(test_kernel_SE_ARD) ...@@ -29,8 +29,9 @@ BOOST_AUTO_TEST_CASE(test_kernel_SE_ARD)
kernel::SquaredExpARD<Params> se(2); kernel::SquaredExpARD<Params> se(2);
Eigen::VectorXd hp(se.h_params_size()); Eigen::VectorXd hp(se.h_params_size());
hp(0) = 0; //exp(0)=1 // kernel does not expect in log space anymore
hp(1) = 0; hp(0) = 1;
hp(1) = 1;
se.set_h_params(hp); se.set_h_params(hp);
...@@ -40,9 +41,9 @@ BOOST_AUTO_TEST_CASE(test_kernel_SE_ARD) ...@@ -40,9 +41,9 @@ BOOST_AUTO_TEST_CASE(test_kernel_SE_ARD)
Eigen::VectorXd v2 = make_v2(0, 1); Eigen::VectorXd v2 = make_v2(0, 1);
double s1 = se(v1, v2); double s1 = se(v1, v2);
BOOST_CHECK(std::abs(s1 - exp(-0.5 * (v1.transpose() * v2)[0])) < 1e-5); BOOST_CHECK(std::abs(s1 - std::exp(-0.5 * (v1.transpose() * v2)[0])) < 1e-5);
hp(0) = 1; hp(0) = std::exp(1);
se.set_h_params(hp); se.set_h_params(hp);
double s2 = se(v1, v2); double s2 = se(v1, v2);
BOOST_CHECK(s1 < s2); BOOST_CHECK(s1 < s2);
...@@ -50,8 +51,8 @@ BOOST_AUTO_TEST_CASE(test_kernel_SE_ARD) ...@@ -50,8 +51,8 @@ BOOST_AUTO_TEST_CASE(test_kernel_SE_ARD)
Params::kernel_squared_exp_ard::set_k(1); Params::kernel_squared_exp_ard::set_k(1);
se = kernel::SquaredExpARD<Params>(2); se = kernel::SquaredExpARD<Params>(2);
hp = Eigen::VectorXd(se.h_params_size()); hp = Eigen::VectorXd(se.h_params_size());
hp(0) = 0; hp(0) = 1;
hp(1) = 0; hp(1) = 1;
hp(2) = 0; hp(2) = 0;
hp(3) = 0; hp(3) = 0;
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment