Commit 9789970f authored by Jean-Baptiste Mouret's avatar Jean-Baptiste Mouret
Browse files

fist trials

parent 93adfc2b
...@@ -34,10 +34,13 @@ extensions = [ ...@@ -34,10 +34,13 @@ extensions = [
'sphinx.ext.todo', 'sphinx.ext.todo',
'sphinx.ext.mathjax', 'sphinx.ext.mathjax',
'sphinx.ext.ifconfig', 'sphinx.ext.ifconfig',
'sphinxcontrib.bibtex' 'sphinxcontrib.bibtex',
'breathe'
] ]
breathe_projects = { "limbo": "/Users/jbm/Documents/git/resibots/limbo/docs/doxygen_doc/xml/" }
breathe_default_project="limbo"
# Add any paths that contain templates here, relative to this directory. # Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates'] templates_path = ['_templates']
......
...@@ -45,6 +45,7 @@ Contents: ...@@ -45,6 +45,7 @@ Contents:
self self
tutorials/index tutorials/index
guides/index guides/index
api
faq faq
......
...@@ -7,7 +7,11 @@ ...@@ -7,7 +7,11 @@
namespace limbo { namespace limbo {
namespace defaults { namespace defaults {
/**
\defgroup defaults
*/
struct acqui_gpucb { struct acqui_gpucb {
/// \ingroup defaults
BO_PARAM(double, delta, 0.001); BO_PARAM(double, delta, 0.001);
}; };
} }
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
namespace limbo { namespace limbo {
namespace defaults { namespace defaults {
struct acqui_ucb { struct acqui_ucb {
/// \ingroup defaults
BO_PARAM(double, alpha, 0.5); BO_PARAM(double, alpha, 0.5);
}; };
} }
......
...@@ -92,6 +92,51 @@ namespace limbo { ...@@ -92,6 +92,51 @@ namespace limbo {
class A4 = boost::parameter::void_, class A4 = boost::parameter::void_,
class A5 = boost::parameter::void_> class A5 = boost::parameter::void_>
// clang-format on // clang-format on
/** Base class for Bayesian optimizers
**Parameters**
- ``bool Params::bayes_opt_bobase::stats_enabled``: activate / deactivate the statistics
This class is templated by several types with default values (thanks to boost::parameters).
\rst
+----------------+---------+---------+---------------+
|type |typedef | argument| default |
+================+=========+=========+===============+
|init. func. |init_t | initfun | RandomSampling|
+----------------+---------+---------+---------------+
|model |model_t | modelfun| GP<...> |
+----------------+---------+---------+---------------+
|acquisition fun.|aqui_t | acquifun| GP_UCB |
+----------------+---------+---------+---------------+
|statistics | stat_t | statfun | see below |
+----------------+---------+---------+---------------+
|stopping crit. | stop_t | stopcrit| MaxIterations |
+----------------+---------+---------+---------------+
For GP, the default value is: ``model::GP<Params, kf_t, mean_t, opt_t>>``,
- with ``kf_t = kernel::SquaredExpARD<Params>``
- with ``mean_t = mean::Data<Params>``
- with ``opt_t = model::gp::KernelLFOpt<Params>``
(meaning: kernel with automatic relevance determination and mean equals to the mean of the input data, that is, center the data automatically)
For Statistics, the default value is: ``boost::fusion::vector<stat::Samples<Params>, stat::AggregatedObservations<Params>, stat::ConsoleSummary<Params>>``
\endrst
Example of customization:
- typedef kernel::MaternFiveHalfs<Params> Kernel_t;
- typedef mean::Data<Params> Mean_t;
- typedef model::GP<Params, Kernel_t, Mean_t> GP_t;
- typedef acqui::UCB<Params, GP_t> Acqui_t;
- bayes_opt::BOptimizer<Params, modelfun<GP_t>, acquifun<Acqui_t>> opt;
@see limbo::bayes_opt::Boptimizer
*/
class BoBase { class BoBase {
public: public:
typedef Params params_t; typedef Params params_t;
...@@ -120,28 +165,37 @@ namespace limbo { ...@@ -120,28 +165,37 @@ namespace limbo {
typedef typename boost::mpl::if_<boost::fusion::traits::is_sequence<StoppingCriteria>, StoppingCriteria, boost::fusion::vector<StoppingCriteria>>::type stopping_criteria_t; typedef typename boost::mpl::if_<boost::fusion::traits::is_sequence<StoppingCriteria>, StoppingCriteria, boost::fusion::vector<StoppingCriteria>>::type stopping_criteria_t;
typedef typename boost::mpl::if_<boost::fusion::traits::is_sequence<Stat>, Stat, boost::fusion::vector<Stat>>::type stat_t; typedef typename boost::mpl::if_<boost::fusion::traits::is_sequence<Stat>, Stat, boost::fusion::vector<Stat>>::type stat_t;
/// default constructor
BoBase() : _total_iterations(0) { _make_res_dir(); } BoBase() : _total_iterations(0) { _make_res_dir(); }
// disable copy (dangerous and useless) /// copy is disabled (dangerous and useless)
BoBase(const BoBase& other) = delete; BoBase(const BoBase& other) = delete;
/// copy is disabled (dangerous and useless)
BoBase& operator=(const BoBase& other) = delete; BoBase& operator=(const BoBase& other) = delete;
/// @return true if the statitics are enabled (they can be disabled to avoid dumping data, e.g. for unit tests)
bool stats_enabled() const { return Params::bayes_opt_bobase::stats_enabled(); } bool stats_enabled() const { return Params::bayes_opt_bobase::stats_enabled(); }
/// @return the name of the directory in which results (statistics) are written
const std::string& res_dir() const { return _res_dir; } const std::string& res_dir() const { return _res_dir; }
///@return the vector of points of observations (observations can be multi-dimensional, hence the VectorXd) -- f(x)
const std::vector<Eigen::VectorXd>& observations() const { return _observations; } const std::vector<Eigen::VectorXd>& observations() const { return _observations; }
///@return list of the points that have been evaluated so far (x)
const std::vector<Eigen::VectorXd>& samples() const { return _samples; } const std::vector<Eigen::VectorXd>& samples() const { return _samples; }
///@return list of blacklisted points
const std::vector<Eigen::VectorXd>& bl_samples() const { return _bl_samples; } const std::vector<Eigen::VectorXd>& bl_samples() const { return _bl_samples; }
///@return the current iteration number
int current_iteration() const { return _current_iteration; } int current_iteration() const { return _current_iteration; }
int total_iterations() const { return _total_iterations; } int total_iterations() const { return _total_iterations; }
// does not update the model ! /// Add a new sample / observation pair
// we don't add NaN and inf observations /// - does not update the model!
/// - we don't add NaN and inf observations
void add_new_sample(const Eigen::VectorXd& s, const Eigen::VectorXd& v) void add_new_sample(const Eigen::VectorXd& s, const Eigen::VectorXd& v)
{ {
if (tools::is_nan_or_inf(v)) if (tools::is_nan_or_inf(v))
...@@ -150,8 +204,10 @@ namespace limbo { ...@@ -150,8 +204,10 @@ namespace limbo {
_observations.push_back(v); _observations.push_back(v);
} }
/// Add a new blacklisted sample
void add_new_bl_sample(const Eigen::VectorXd& s) { _bl_samples.push_back(s); } void add_new_bl_sample(const Eigen::VectorXd& s) { _bl_samples.push_back(s); }
/// Evaluate a sample and add the result to the 'database' (sample / observations vectors)
template <typename StateFunction> template <typename StateFunction>
bool eval_and_add(const StateFunction& seval, const Eigen::VectorXd& sample) bool eval_and_add(const StateFunction& seval, const Eigen::VectorXd& sample)
{ {
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
namespace limbo { namespace limbo {
namespace defaults { namespace defaults {
struct kernel_exp { struct kernel_exp {
/// \ingroup defaults
BO_PARAM(double, sigma, 1); BO_PARAM(double, sigma, 1);
}; };
} }
......
...@@ -8,7 +8,9 @@ ...@@ -8,7 +8,9 @@
namespace limbo { namespace limbo {
namespace defaults { namespace defaults {
struct kernel_maternfivehalfs { struct kernel_maternfivehalfs {
/// \ingroup defaults
BO_PARAM(double, sigma, 1); BO_PARAM(double, sigma, 1);
/// \ingroup defaults
BO_PARAM(double, l, 1); BO_PARAM(double, l, 1);
}; };
} }
......
...@@ -8,7 +8,9 @@ ...@@ -8,7 +8,9 @@
namespace limbo { namespace limbo {
namespace defaults { namespace defaults {
struct kernel_maternthreehalfs { struct kernel_maternthreehalfs {
/// \ingroup defaults
BO_PARAM(double, sigma, 1); BO_PARAM(double, sigma, 1);
/// \ingroup defaults
BO_PARAM(double, l, 1); BO_PARAM(double, l, 1);
}; };
} }
......
...@@ -15,6 +15,20 @@ ...@@ -15,6 +15,20 @@
namespace limbo { namespace limbo {
namespace model { namespace model {
/**
\rst
A classic Gaussian process.
It is parametrized by:
- The :ref:`Params <params-guide>`
- a mean function
- [optionnal] an optimizer for the hyper-parameters
.. doxygenstruct:: limbo::defaults::acqui_ucb
:members:
:undoc-members:
\endrst
*/
template <typename Params, typename KernelFunction, typename MeanFunction, class HyperParamsOptimizer = gp::NoLFOpt<Params>> template <typename Params, typename KernelFunction, typename MeanFunction, class HyperParamsOptimizer = gp::NoLFOpt<Params>>
class GP { class GP {
public: public:
......
#ifndef LIMBO_OPT_HPP #ifndef LIMBO_OPT_HPP
#define LIMBO_OPT_HPP #define LIMBO_OPT_HPP
///@defgroup opt_defaults
///@defgroup opt
#include <limbo/opt/optimizer.hpp> #include <limbo/opt/optimizer.hpp>
#ifdef USE_LIBCMAES #ifdef USE_LIBCMAES
#include <limbo/opt/cmaes.hpp> #include <limbo/opt/cmaes.hpp>
......
...@@ -18,11 +18,25 @@ ...@@ -18,11 +18,25 @@
namespace limbo { namespace limbo {
namespace defaults { namespace defaults {
struct opt_cmaes { struct opt_cmaes {
/// @ingroup opt_defaults
/// number of restarts of CMA-ES
BO_PARAM(int, restarts, 1); BO_PARAM(int, restarts, 1);
/// @ingroup opt_defaults
/// number of calls to the function to be optimized
BO_PARAM(double, max_fun_evals, -1); BO_PARAM(double, max_fun_evals, -1);
}; };
} }
namespace opt { namespace opt {
/// @ingroup opt
/// Covariance Matrix Adaptation Evolution Strategy by Hansen et al.
/// (See: https://www.lri.fr/~hansen/cmaesintro.html)
/// - our implementation is based on libcmaes (https://github.com/beniz/libcmaes)
/// - Support bounded and unbounded optimization
/// - Only available if libcmaes is installed (see the compilation instructions)
///
/// - Parameters :
/// - double max_fun_evals
/// - int restarts
template <typename Params> template <typename Params>
struct Cmaes { struct Cmaes {
public: public:
......
...@@ -11,10 +11,17 @@ ...@@ -11,10 +11,17 @@
namespace limbo { namespace limbo {
namespace defaults { namespace defaults {
struct opt_gridsearch { struct opt_gridsearch {
/// @ingroup opt_defaults
/// number of bins for each dimension
BO_PARAM(int, bins, 5); BO_PARAM(int, bins, 5);
}; };
} }
namespace opt { namespace opt {
/// @ingroup opt
/// Grid search
///
/// Parameters:
/// - int bins
template <typename Params> template <typename Params>
struct GridSearch { struct GridSearch {
public: public:
......
...@@ -16,10 +16,39 @@ ...@@ -16,10 +16,39 @@
namespace limbo { namespace limbo {
namespace defaults { namespace defaults {
struct opt_nloptgrad { struct opt_nloptgrad {
/// @ingroup opt_defaults
/// number of calls to the optimized function
BO_PARAM(int, iterations, 500); BO_PARAM(int, iterations, 500);
}; };
} }
namespace opt { namespace opt {
/// @ingroup opt
/// Binding to gradient-based NLOpt algorithms.
/// See: http://ab-initio.mit.edu/wiki/index.php/NLopt_Algorithms
///
/// Algorithms:
/// - GD_STOGO
/// - GD_STOGO_RAND
/// - LD_LBFGS_NOCEDAL
/// - LD_LBFGS
/// - LN_PRAXIS
/// - LD_VAR1
/// - LD_VAR2
/// - LD_TNEWTON
/// - LD_TNEWTON_RESTART
/// - LD_TNEWTON_PRECOND
/// - LD_TNEWTON_PRECOND_RESTART
/// - GD_MLSL
/// - GN_MLSL_LDS
/// - GD_MLSL_LDS
/// - LD_MMA
/// - LD_AUGLAG
/// - LD_AUGLAG_EQ
/// - LD_SLSQP
/// - LD_CCSAQ
///
/// Parameters :
/// - int iterations
template <typename Params, nlopt::algorithm Algorithm = nlopt::LD_LBFGS> template <typename Params, nlopt::algorithm Algorithm = nlopt::LD_LBFGS>
struct NLOptGrad { struct NLOptGrad {
public: public:
......
...@@ -16,10 +16,40 @@ ...@@ -16,10 +16,40 @@
namespace limbo { namespace limbo {
namespace defaults { namespace defaults {
struct opt_nloptnograd { struct opt_nloptnograd {
/// @ingroup opt_defaults
/// number of calls to the optimized function
BO_PARAM(int, iterations, 500); BO_PARAM(int, iterations, 500);
}; };
} }
namespace opt { namespace opt {
/// @ingroup opt
/// Binding to gradient-free NLOpt algorithms.
/// See: http://ab-initio.mit.edu/wiki/index.php/NLopt_Algorithms
///
/// Algorithms:
/// - GN_DIRECT
/// - GN_DIRECT_L, [default]
/// - GN_DIRECT_L_RAND
/// - GN_DIRECT_NOSCAL
/// - GN_DIRECT_L_NOSCAL
/// - GN_DIRECT_L_RAND_NOSCAL
/// - GN_ORIG_DIRECT
/// - GN_ORIG_DIRECT_L
/// - GN_CRS2_LM
/// - GN_MLSL
/// - GN_MLSL_LDS
/// - GN_ISRES
/// - LN_COBYLA
/// - LN_AUGLAG_EQ
/// - LN_BOBYQA
/// - LN_NEWUOA
/// - LN_NEWUOA_BOUND
/// - LN_NELDERMEAD
/// - LN_SBPLX
/// - LN_AUGLAG
///
/// Parameters:
/// - int iterations
template <typename Params, nlopt::algorithm Algorithm = nlopt::GN_DIRECT_L_RAND> template <typename Params, nlopt::algorithm Algorithm = nlopt::GN_DIRECT_L_RAND>
struct NLOptNoGrad { struct NLOptNoGrad {
public: public:
......
...@@ -9,33 +9,33 @@ ...@@ -9,33 +9,33 @@
namespace limbo { namespace limbo {
namespace opt { namespace opt {
// return type of the function to optimize /// return type of the function to optimize
typedef std::pair<double, boost::optional<Eigen::VectorXd>> eval_t; typedef std::pair<double, boost::optional<Eigen::VectorXd>> eval_t;
// return with opt::no_grand(your_val) if no gradient is available /// return with opt::no_grad(your_val) if no gradient is available (to be used in functions to be optimized)
eval_t no_grad(double x) { return eval_t{x, boost::optional<Eigen::VectorXd>{}}; } eval_t no_grad(double x) { return eval_t{x, boost::optional<Eigen::VectorXd>{}}; }
// get the gradient from a function evaluation /// get the gradient from a function evaluation (eval_t)
const Eigen::VectorXd& grad(const eval_t& fg) const Eigen::VectorXd& grad(const eval_t& fg)
{ {
assert(std::get<1>(fg).is_initialized()); assert(std::get<1>(fg).is_initialized());
return std::get<1>(fg).get(); return std::get<1>(fg).get();
} }
// get the value from a function evaluation /// get the value from a function evaluation (eval_t)
double fun(const eval_t& fg) double fun(const eval_t& fg)
{ {
return std::get<0>(fg); return std::get<0>(fg);
} }
// eval f without gradient /// Evaluate f without gradient (to be called from the optimization algorithms that do not use the gradient)
template <typename F> template <typename F>
double eval(const F& f, const Eigen::VectorXd& x) double eval(const F& f, const Eigen::VectorXd& x)
{ {
return std::get<0>(f(x, false)); return std::get<0>(f(x, false));
} }
// eval f with gradient /// Evaluate f with gradient (to be called from the optimization algorithms that use the gradient)
template <typename F> template <typename F>
eval_t eval_grad(const F& f, const Eigen::VectorXd& x) eval_t eval_grad(const F& f, const Eigen::VectorXd& x)
{ {
......
...@@ -13,10 +13,18 @@ ...@@ -13,10 +13,18 @@
namespace limbo { namespace limbo {
namespace defaults { namespace defaults {
struct opt_parallelrepeater { struct opt_parallelrepeater {
/// @ingroup opt_defaults
/// number of replicates
BO_PARAM(int, repeats, 10); BO_PARAM(int, repeats, 10);
}; };
} }
namespace opt { namespace opt {
/// @ingroup opt
/// Meta-optimizer: run the same algorithm in parallel many times from different init points and return the maximum found among all the replicates
/// (useful for local algorithms)
///
/// Parameters:
/// - int repeats
template <typename Params, typename Optimizer> template <typename Params, typename Optimizer>
struct ParallelRepeater { struct ParallelRepeater {
template <typename F> template <typename F>
......
...@@ -6,6 +6,10 @@ ...@@ -6,6 +6,10 @@
namespace limbo { namespace limbo {
namespace opt { namespace opt {
/// @ingroup opt
/// - return a random point in [0, 1]
/// - no parameters
/// - useful for control experiments (do not use this otherwise!)
template <typename Params> template <typename Params>
struct RandomPoint { struct RandomPoint {
template <typename F> template <typename F>
......
...@@ -14,16 +14,22 @@ ...@@ -14,16 +14,22 @@
namespace limbo { namespace limbo {
namespace defaults { namespace defaults {
struct opt_rprop { struct opt_rprop {
/// @ingroup opt_defaults
BO_PARAM(int, iterations, 300); BO_PARAM(int, iterations, 300);
}; };
} }
namespace opt { namespace opt {
// partly inspired by libgp: https://github.com/mblum/libgp /// @ingroup opt
// reference : /// Gradient-based optimization (rprop)
// Blum, M., & Riedmiller, M. (2013). Optimization of Gaussian /// - partly inspired by libgp: https://github.com/mblum/libgp
// Process Hyperparameters using Rprop. In European Symposium /// - reference :
// on Artificial Neural Networks, Computational Intelligence /// Blum, M., & Riedmiller, M. (2013). Optimization of Gaussian
// and Machine Learning. /// Process Hyperparameters using Rprop. In European Symposium
/// on Artificial Neural Networks, Computational Intelligence
/// and Machine Learning.
///
/// Parameters:
/// - int iterations
template <typename Params> template <typename Params>
struct Rprop { struct Rprop {
template <typename F> template <typename F>
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment