Commit 7beb47fa authored by Konstantinos Chatzilygeroudis's avatar Konstantinos Chatzilygeroudis
Browse files

Make the code compile without cmaes

parent 51b8cda8
......@@ -18,7 +18,7 @@ We first need to define a function to be optimized. Here we chose :math:`-(x_1-0
.. literalinclude:: ../../src/tutorials/opt.cpp
:language: c++
:linenos:
:lines: 27-34
:lines: 30-37
.. warning::
......@@ -37,14 +37,14 @@ As usual, each algorithm has some parameters (typically the number of iterations
.. literalinclude:: ../../src/tutorials/opt.cpp
:language: c++
:linenos:
:lines: 7-10
:lines: 7-26
Now we can instantiate our optimizer and call it:
.. literalinclude:: ../../src/tutorials/opt.cpp
:language: c++
:linenos:
:lines: 40-45
:lines: 43-48
We can do the same with a gradient-free optimizer from NLOpt:
......@@ -52,7 +52,7 @@ We can do the same with a gradient-free optimizer from NLOpt:
.. literalinclude:: ../../src/tutorials/opt.cpp
:language: c++
:linenos:
:lines: 47-53
:lines: 50-56
Or with CMA-ES:
......@@ -60,7 +60,7 @@ Or with CMA-ES:
.. literalinclude:: ../../src/tutorials/opt.cpp
:language: c++
:linenos:
:lines: 58-62
:lines: 61-65
See the :ref:`API documentation <opt-api>` for more details.
......@@ -68,5 +68,5 @@ See the :ref:`API documentation <opt-api>` for more details.
Here is the full file.
.. literalinclude:: ../../src/tutorials/opt.cpp
:language: python
:language: c++
:linenos:
......@@ -4,63 +4,66 @@
// this short tutorial shows how to use the optimization api of limbo (opt::)
using namespace limbo;
#ifdef USE_NLOPT
struct ParamsGrad {
struct opt_nloptgrad {
BO_PARAM(int, iterations, 80);
};
struct opt_nloptgrad {
BO_PARAM(int, iterations, 80);
};
};
struct ParamsNoGrad {
struct opt_nloptnograd {
BO_PARAM(int, iterations, 80);
};
struct opt_nloptnograd {
BO_PARAM(int, iterations, 80);
};
};
#endif
#ifdef USE_LIBCMAES
struct ParamsCMAES {
struct opt_cmaes : public defaults::opt_cmaes {
};
struct opt_cmaes : public defaults::opt_cmaes {
};
};
#endif
// we maximize -(x_1-0.5)^2 - (x_2-0.5)^2
// the maximum is [0.5, 0.5] (f([0.5, 0.5] = 0))
opt::eval_t my_function(const Eigen::VectorXd& params, bool eval_grad = false)
{
double v = -(params.array() - 0.5).square().sum();
if (!eval_grad)
return opt::no_grad(v);
Eigen::VectorXd grad = (-2 * params).array() + 1.0;
return {v, grad};
double v = -(params.array() - 0.5).square().sum();
if (!eval_grad)
return opt::no_grad(v);
Eigen::VectorXd grad = (-2 * params).array() + 1.0;
return {v, grad};
}
int main(int argc, char** argv)
{
#ifdef USE_NLOPT
// the type of the optimizer (here NLOpt with the LN_LBGFGS algorithm)
opt::NLOptGrad<ParamsGrad, nlopt::LD_LBFGS> lbfgs;
// we start from a random point (in 2D), and the search is not bounded
Eigen::VectorXd res_lbfgs = lbfgs(my_function, tools::random_vector(2), false);
std::cout <<"Result with LBFGS:\t" << res_lbfgs.transpose()
<< " -> " << my_function(res_lbfgs).first << std::endl;
// the type of the optimizer (here NLOpt with the LN_LBGFGS algorithm)
opt::NLOptGrad<ParamsGrad, nlopt::LD_LBFGS> lbfgs;
// we start from a random point (in 2D), and the search is not bounded
Eigen::VectorXd res_lbfgs = lbfgs(my_function, tools::random_vector(2), false);
std::cout << "Result with LBFGS:\t" << res_lbfgs.transpose()
<< " -> " << my_function(res_lbfgs).first << std::endl;
// we can also use a gradient-free algorith, like DIRECT
opt::NLOptNoGrad<ParamsNoGrad, nlopt::GN_DIRECT> direct;
// we start from a random point (in 2D), and the search is bounded in [0,1]
// be careful that DIRECT does not support unbounded search
Eigen::VectorXd res_direct = direct(my_function, tools::random_vector(2), true);
std::cout <<"Result with DIRECT:\t" << res_direct.transpose()
<< " -> " << my_function(res_direct).first << std::endl;
// we can also use a gradient-free algorith, like DIRECT
opt::NLOptNoGrad<ParamsNoGrad, nlopt::GN_DIRECT> direct;
// we start from a random point (in 2D), and the search is bounded in [0,1]
// be careful that DIRECT does not support unbounded search
Eigen::VectorXd res_direct = direct(my_function, tools::random_vector(2), true);
std::cout << "Result with DIRECT:\t" << res_direct.transpose()
<< " -> " << my_function(res_direct).first << std::endl;
#endif
#ifdef USE_LIBCMAES
// or Cmaes
opt::Cmaes<ParamsCMAES> cmaes;
Eigen::VectorXd res_cmaes = cmaes(my_function, tools::random_vector(2), false);
std::cout <<"Result with CMA-ES:\t" << res_cmaes.transpose()
<< " -> " << my_function(res_cmaes).first << std::endl;
// or Cmaes
opt::Cmaes<ParamsCMAES> cmaes;
Eigen::VectorXd res_cmaes = cmaes(my_function, tools::random_vector(2), false);
std::cout << "Result with CMA-ES:\t" << res_cmaes.transpose()
<< " -> " << my_function(res_cmaes).first << std::endl;
#endif
return 0;
return 0;
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment