Commit 9a83714f authored by Konstantinos Chatzilygeroudis's avatar Konstantinos Chatzilygeroudis
Browse files

Fix for seg faults + cleaning

parent 1f45abc3
......@@ -17,10 +17,10 @@ namespace limbo {
public:
GP_UCB(const Model& model, int iteration) : _model(model)
{
double t3 = pow(iteration, 3.0);
double t3 = std::pow(iteration, 3.0);
static constexpr double delta3 = Params::acqui_gpucb::delta() * 3;
static constexpr double pi2 = M_PI * M_PI;
_beta = sqrtf(2.0 * log(t3 * pi2 / delta3));
_beta = std::sqrt(2.0 * std::log(t3 * pi2 / delta3));
}
size_t dim_in() const { return _model.dim_in(); }
......@@ -33,7 +33,7 @@ namespace limbo {
Eigen::VectorXd mu;
double sigma;
std::tie(mu, sigma) = _model.query(v);
return (afun(mu) + _beta * sqrt(sigma));
return (afun(mu) + _beta * std::sqrt(sigma));
}
protected:
......
......@@ -111,12 +111,12 @@ namespace limbo {
#ifdef USE_LIBCMAES
typedef opt::Cmaes<Params> acquiopt_t; // 2
#elif defined(USE_NLOPT)
typedef opt::NLOptNoGrad<Params, nlopt::GN_DIRECT_L_RAND> acquiopt_t;
typedef opt::NLOptNoGrad<Params, nlopt::GN_DIRECT_L_RAND> acquiopt_t;
#else
#warning NO NLOpt, and NO Libcmaes: the acquisition function will be optimized by a grid search algorithm (which is usually bad). Please install at least NLOpt or libcmaes to use limbo!.
typedef opt::GridSearch<Params> acquiopt_t;
typedef opt::GridSearch<Params> acquiopt_t;
#endif
typedef kernel::SquaredExpARD<Params> kf_t;
typedef mean::Data<Params> mean_t;
typedef model::GP<Params, kf_t, mean_t, model::gp::KernelLFOpt<Params>> model_t; // 3
......
......@@ -47,7 +47,7 @@ namespace limbo {
// we do not have gradient in our current acquisition function
auto acqui_optimization =
[&](const Eigen::VectorXd& x, bool g) { return opt::no_grad(acqui(x, afun)); };
[&](const Eigen::VectorXd& x, bool g) { return opt::no_grad(acqui(x, afun)); };
Eigen::VectorXd starting_point = tools::rand_vec(StateFunction::dim_in);
Eigen::VectorXd new_sample = acqui_optimizer(acqui_optimization, starting_point, true);
bool blacklisted = false;
......
......@@ -18,7 +18,7 @@ namespace limbo {
double operator()(const Eigen::VectorXd& v1, const Eigen::VectorXd& v2) const
{
double _sigma = Params::kernel_exp::sigma();
return (exp(-(1 / (2 * pow(_sigma, 2))) * pow((v1 - v2).norm(), 2)));
return (std::exp(-(1 / (2 * std::pow(_sigma, 2))) * std::pow((v1 - v2).norm(), 2)));
}
};
}
......
......@@ -14,7 +14,7 @@
#include <limbo/opt/optimizer.hpp>
namespace limbo {
namespace defaults {
namespace defaults {
struct opt_nloptgrad {
BO_PARAM(int, iterations, 500);
};
......@@ -52,17 +52,17 @@ namespace limbo {
template <typename F>
static double nlopt_func(const std::vector<double>& x, std::vector<double>& grad, void* my_func_data)
{
F& f = (F&)(my_func_data);
F* f = (F*)(my_func_data);
Eigen::VectorXd params = Eigen::VectorXd::Map(x.data(), x.size());
double v;
if (!grad.empty()) {
auto r = eval_grad(f, params);
auto r = eval_grad(*f, params);
v = opt::fun(r);
Eigen::VectorXd g = opt::grad(r);
Eigen::VectorXd::Map(&grad[0], g.size()) = g;
}
else {
v = eval(f, params);
v = eval(*f, params);
}
return v;
}
......
......@@ -29,6 +29,7 @@ namespace limbo {
// Assert that the algorithm is non-gradient
// TO-DO: Add support for MLSL (Multi-Level Single-Linkage)
// TO-DO: Add better support for ISRES (Improved Stochastic Ranking Evolution Strategy)
// clang-format off
static_assert(Algorithm == nlopt::LN_COBYLA || Algorithm == nlopt::LN_BOBYQA ||
Algorithm == nlopt::LN_NEWUOA || Algorithm == nlopt::LN_NEWUOA_BOUND ||
Algorithm == nlopt::LN_PRAXIS || Algorithm == nlopt::LN_NELDERMEAD ||
......@@ -39,6 +40,7 @@ namespace limbo {
Algorithm == nlopt::GN_ORIG_DIRECT_L || Algorithm == nlopt::GN_CRS2_LM ||
Algorithm == nlopt::GD_STOGO || Algorithm == nlopt::GD_STOGO_RAND ||
Algorithm == nlopt::GN_ISRES || Algorithm == nlopt::GN_ESCH, "NLOptNoGrad accepts gradient free nlopt algorithms only");
// clang-format on
int dim = init.size();
nlopt::opt opt(Algorithm, dim);
......@@ -66,9 +68,9 @@ namespace limbo {
template <typename F>
static double nlopt_func(const std::vector<double>& x, std::vector<double>& grad, void* my_func_data)
{
F& f = (F&)(my_func_data);
F* f = (F*)(my_func_data);
Eigen::VectorXd params = Eigen::VectorXd::Map(x.data(), x.size());
double v = eval(f, params);
double v = eval(*f, params);
return v;
}
};
......
......@@ -96,6 +96,13 @@ def build_extensive_tests(ctx):
ctx.recurse('src/')
ctx.recurse('src/tests')
def run_extensive_tests(ctx):
for fullname in glob.glob('build/src/tests/combinations/*'):
if os.path.isfile(fullname) and os.access(fullname, os.X_OK):
fpath, fname = os.path.split(fullname)
print "Running: " + fname
s = "cd " + fpath + "; ./" + fname
retcode = subprocess.call(s, shell=True, env=None)
def submit_extensive_tests(ctx):
for fullname in glob.glob('build/src/tests/combinations/*'):
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment