Commit e17dbec7 authored by Federico Allocati's avatar Federico Allocati
Browse files

Merge branch 'master' of https://github.com/fedeallocati/limbo into gp_multi

Conflicts:
	limbo.py
	src/cmaes/cmaes.c
	src/limbo/boptimizer.hpp
	src/limbo/gp.hpp
	src/limbo/gp_auto.hpp
	src/limbo/kernel_functions.hpp
	src/limbo/rprop.hpp
	wscript
parents 0760e54f 69fd39aa
language: cpp
compiler:
- gcc
- clang
sudo: required
install: sudo apt-get update -qq && sudo apt-get install -qq libboost-all-dev libboost-test-dev libeigen3-dev libtbb-dev
# Change this to your needs
script:
./waf configure && ./waf
\ No newline at end of file
......@@ -3,6 +3,12 @@ limbo
A lightweight framework for Bayesian and model-based optimisation of black-box functions (C++11).
Documentation
-------------
A minimal documentation is (will be?) available on the wiki: https://github.com/resibots/limbo/wiki
Many mechanisms are inspired by [sferes2](http://github.com/sferes2/sferes2): looking at the documentation of sferes2 might help.
Authors
------
- Antoine Cully (Pierre & Marie Curie University): http://www.isir.upmc.fr/?op=view_profil&lang=fr&id=278
......@@ -11,7 +17,7 @@ Authors
Main features
-------------
- Bayesian optimisation based on Gaussian processes
- Parego (Multi-objective optimization)
- Parego (Multi-objective optimization), experimental support for other multi-objective algorithms
- Generic framework (template-based), which allows easy customization for testing original ideas
- Can exploit multicore computers
......
......@@ -55,6 +55,7 @@ BOOST_TOOLSETS = {
'edg': 'edg',
'g++': detect_mingw,
'gcc': detect_mingw,
'icc': detect_intel,
'icpc': detect_intel,
'intel': detect_intel,
'kcc': 'kcc',
......
......@@ -20,7 +20,7 @@ def check_eigen(conf):
conf.env.INCLUDES_EIGEN = [conf.options.eigen]
conf.env.LIBPATH_EIGEN = [conf.options.eigen]
else:
conf.env.INCLUDES_EIGEN = ['/usr/include/eigen2',
conf.env.INCLUDES_EIGEN = ['/usr/include/eigen3',
'/usr/local/include/eigen3',
'/usr/include', '/usr/local/include']
res = conf.find_file('Eigen/Core', conf.env.INCLUDES_EIGEN)
......
import sys, os
import os
import stat
import subprocess
import commands
json_ok = True
try:
import simplejson
import simplejson
except:
json_ok = False
print "WARNING simplejson not found some function may not work"
json_ok = False
print "WARNING simplejson not found some function may not work"
import glob
def options(opt):
opt.add_option('--qsub', type='string', help='json file to submit to torque', dest='qsub')
opt.add_option('--qsub', type='string', help='json file to submit to torque', dest='qsub')
def create_variants(bld, source, uselib_local,
uselib, variants, includes=". ../",
uselib, variants, includes=". ../",
cxxflags='',
json=''):
# the basic one
# tgt = bld.new_task_gen('cxx', 'program')
# tgt.source = source
# tgt.includes = includes
# tgt.uselib_local = uselib_local
# tgt.uselib = uselib
# the variants
c_src = bld.path.abspath() + '/'
for v in variants:
# create file
suff = ''
for d in v.split(' '): suff += d.lower() + '_'
tmp = source.replace('.cpp', '')
src_fname = tmp + '_' + suff[0:len(suff) - 1] + '.cpp'
bin_fname = tmp + '_' + suff[0:len(suff) - 1]
f = open(c_src + src_fname, 'w')
f.write("// THIS IS A GENERATED FILE - DO NOT EDIT\n")
for d in v.split(' '): f.write("#define " + d + "\n")
f.write("#line 1 \"" + c_src + source + "\"\n")
code = open(c_src + source, 'r')
for line in code: f.write(line)
bin_name = src_fname.replace('.cpp', '')
bin_name = os.path.basename(bin_name)
# create build
tgt = bld.program(features = 'cxx',
source = src_fname,
target = bin_fname,
includes = includes,
uselib = uselib,
use = uselib_local)
json='',
target=''):
# the basic one
# tgt = bld.new_task_gen('cxx', 'program')
# tgt.source = source
# tgt.includes = includes
# tgt.uselib_local = uselib_local
# tgt.uselib = uselib
# the variants
# we create a basic file
if not target:
tmp = source.replace('.cpp', '')
else:
tmp = target
bld.program(features='cxx',
source=source,
target=tmp,
includes=includes,
uselib=uselib,
use=uselib_local)
c_src = bld.path.abspath() + '/'
for v in variants:
# create file
suff = ''
for d in v.split(' '):
suff += d.lower() + '_'
src_fname = tmp + '_' + suff[0:len(suff) - 1] + '.cpp'
bin_fname = tmp + '_' + suff[0:len(suff) - 1]
f = open(c_src + src_fname, 'w')
f.write("// THIS IS A GENERATED FILE - DO NOT EDIT\n")
for d in v.split(' '):
f.write("#define " + d + "\n")
f.write("#line 1 \"" + c_src + source + "\"\n")
code = open(c_src + source, 'r')
for line in code:
f.write(line)
bin_name = src_fname.replace('.cpp', '')
bin_name = os.path.basename(bin_name)
# create build
bld.program(features='cxx',
source=src_fname,
target=bin_fname,
includes=includes,
uselib=uselib,
use=uselib_local)
def _sub_script(tpl, conf_file):
if 'LD_LIBRARY_PATH' in os.environ:
ld_lib_path = os.environ['LD_LIBRARY_PATH']
else:
ld_lib_path = "''"
print 'LD_LIBRARY_PATH=' + ld_lib_path
# parse conf
conf = simplejson.load(open(conf_file))
exps = conf['exps']
nb_runs = conf['nb_runs']
res_dir = conf['res_dir']
bin_dir = conf['bin_dir']
wall_time = conf['wall_time']
use_mpi = "false"
try:
use_mpi = conf['use_mpi']
except:
use_mpi = "false"
try:
nb_cores = conf['nb_cores']
except:
nb_cores = 1
try:
args = conf['args']
except:
args = ''
email = conf['email']
if (use_mpi == "true"):
ppn = '1'
mpirun = 'mpirun'
else:
# nb_cores = 1;
ppn = "8"
mpirun = ''
fnames = []
for i in range(0, nb_runs):
for e in exps:
directory = res_dir + "/" + e + "/exp_" + str(i)
try:
os.makedirs(directory)
except:
print "WARNING, dir:" + directory + " not be created"
subprocess.call('cp ' + bin_dir + '/' + e + ' ' + directory, shell=True)
fname = directory + "/" + e + "_" + str(i) + ".job"
f = open(fname, "w")
f.write(tpl
.replace("@exp", e)
.replace("@email", email)
.replace("@ld_lib_path", ld_lib_path)
.replace("@wall_time", wall_time)
.replace("@dir", directory)
.replace("@nb_cores", str(nb_cores))
.replace("@ppn", ppn)
.replace("@exec", mpirun + ' ' + directory + '/' + e + ' ' + args))
f.close()
os.chmod(fname, stat.S_IEXEC | stat.S_IREAD | stat.S_IWRITE)
fnames += [(fname, directory)]
return fnames
def qsub(conf_file):
tpl = """
#! /bin/sh
tpl = """#!/bin/sh
#? nom du job affiche
#PBS -N @exp
#PBS -o stdout
#PBS -b stderr
#PBS -M @email
# maximum execution time
#PBS -l walltime=@wall_time
# mail parameters
#PBS -m n
#PBS -m abe
# number of nodes
#PBS -l nodes=@nb_cores:ppn=@ppn
#PBS -l pmem=5200mb -l mem=5200mb
export LD_LIBRARY_PATH=@ld_lib_path
exec @exec
"""
if os.environ.has_key('LD_LIBRARY_PATH'):
ld_lib_path = os.environ['LD_LIBRARY_PATH']
else:
ld_lib_path = "''"
home = os.environ['HOME']
print 'LD_LIBRARY_PATH=' + ld_lib_path
# parse conf
conf = simplejson.load(open(conf_file))
exps = conf['exps']
nb_runs = conf['nb_runs']
res_dir = conf['res_dir']
bin_dir = conf['bin_dir']
wall_time = conf['wall_time']
use_mpi = "false"
try: use_mpi = conf['use_mpi']
except: use_mpi = "false"
try: nb_cores = conf['nb_cores']
except: nb_cores = 1
try: args = conf['args']
except: args = ''
email = conf['email']
if (use_mpi == "true"):
ppn = '1'
mpirun = 'mpirun'
else:
nb_cores = 1;
try: ppn = str(conf['ppn'])
except: ppn='8'
mpirun = ''
for i in range(0, nb_runs):
for e in exps:
if " " in e:
e_arg=e.replace(" ","_")
e, arg=e.split(' ',1)
else:
e_arg=e
arg=""
print e
print arg
fnames = _sub_script(tpl, conf_file)
for (fname, directory) in fnames:
s = "qsub -d " + directory + " " + fname
print "executing:" + s
retcode = subprocess.call(s, shell=True, env=None)
print "qsub returned:" + str(retcode)
directory = res_dir + "/" + e_arg + "/exp_" + str(i)
try:
os.makedirs(directory)
except:
print "WARNING, dir:" + directory + " not be created"
subprocess.call('cp ' + bin_dir + '/' + e + ' ' + directory, shell=True)
fname = home + "/tmp/" + e_arg + "_" + str(i) + ".job"
f = open(fname, "w")
f.write(tpl
.replace("@exp", e_arg)
.replace("@email", email)
.replace("@ld_lib_path", ld_lib_path)
.replace("@wall_time", wall_time)
.replace("@dir", directory)
.replace("@nb_cores", str(nb_cores))
.replace("@ppn", ppn)
.replace("@exec", mpirun + ' ' + directory + '/' + e + ' ' + args + ' ' + arg))
f.close()
s = "qsub -d " + directory + " " + fname
print "executing:" + s
retcode = subprocess.call(s, shell=True, env=None)
print "qsub returned:" + str(retcode)
def oar(conf_file):
tpl = """#!/bin/bash
#OAR -l /core=@nb_cores/nodes=1,walltime=@wall_time
#OAR -n @exp
#OAR -O stdout.%jobid%.log
#OAR -E stderr.%jobid%.log
export LD_LIBRARY_PATH=@ld_lib_path
exec @exec
"""
print 'WARNING [oar]: MPI not supported yet'
fnames = _sub_script(tpl, conf_file)
for (fname, directory) in fnames:
s = "oarsub -d " + directory + " -S " + fname
print "executing:" + s
retcode = subprocess.call(s, shell=True, env=None)
print "oarsub returned:" + str(retcode)
#! /usr/bin/env python
# encoding: utf-8
# F Allocati - 2015
"""
Quick n dirty intel mkl detection
"""
import os, glob, types
from waflib.Configure import conf
def options(opt):
opt.add_option('--mkl', type='string', help='path to Intel Math Kernel Library', dest='mkl')
@conf
def check_mkl(conf):
if conf.options.mkl:
includes_mkl = [conf.options.mkl + '/include']
libpath_mkl = [conf.options.mkl + '/lib/intel64']
else:
includes_mkl = ['/usr/local/include', '/usr/include', '/opt/intel/mkl/include']
libpath_mkl = ['/usr/local/lib/', '/usr/lib', '/opt/intel/mkl/lib/intel64']
conf.start_msg('Checking Intel MKL includes')
try:
res = conf.find_file('mkl.h', includes_mkl)
conf.end_msg('ok')
conf.start_msg('Checking Intel MKL libs')
res = res and conf.find_file('libmkl_core.so', libpath_mkl)
conf.end_msg('ok')
except:
print 'Intel MKL not found'
return
conf.env.LIB_MKL_SEQ = ["mkl_intel_lp64", "mkl_core", "mkl_sequential", "pthread", "m"]
conf.env.LIB_MKL_TBB = ["mkl_intel_lp64", "mkl_core", "mkl_tbb_thread", "tbb", "stdc++", "pthread", "m"]
if conf.env.CXX_NAME in ["icc", "icpc"]:
conf.env.LIB_MKL_OMP = ["mkl_intel_lp64", "mkl_core", "mkl_intel_thread", "pthread", "m"]
else:
conf.env.LIB_MKL_OMP = ["mkl_intel_lp64", "mkl_core", "mkl_gnu_thread", "dl", "pthread", "m"]
conf.env.INCLUDES_MKL_SEQ = includes_mkl
conf.env.INCLUDES_MKL_TBB = includes_mkl
conf.env.INCLUDES_MKL_OMP = includes_mkl
conf.env.LIBPATH_MKL_SEQ = libpath_mkl
conf.env.LIBPATH_MKL_TBB = libpath_mkl
conf.env.LIBPATH_MKL_OMP = libpath_mkl
conf.env.CXXFLAGS_MKL_SEQ = ["-m64", "-DEIGEN_USE_MKL_ALL", "-DMKL_BLAS=MKL_DOMAIN_BLAS"]
conf.env.LINKFLAGS_MKL_SEQ = [ "-Wl,--no-as-needed" ]
conf.env.CXXFLAGS_MKL_TBB = ["-m64", "-DEIGEN_USE_MKL_ALL" , "-DMKL_BLAS=MKL_DOMAIN_BLAS"]
conf.env.LINKFLAGS_MKL_TBB = [ "-Wl,--no-as-needed" ]
if conf.env.CXX_NAME in ["icc", "icpc"]:
conf.env.CXXFLAGS_MKL_OMP = ["-qopenmp", "-m64", "-DEIGEN_USE_MKL_ALL", "-DMKL_BLAS=MKL_DOMAIN_BLAS" ]
else:
conf.env.CXXFLAGS_MKL_OMP = ["-fopenmp", "-m64", "-DEIGEN_USE_MKL_ALL", "-DMKL_BLAS=MKL_DOMAIN_BLAS"]
conf.end_msg('ok')
conf.env.LINKFLAGS_MKL_OMP = [ "-Wl,--no-as-needed" ]
#!/usr/bin/env python
# encoding: utf-8
from waflib.Configure import conf
from waflib.Errors import ConfigurationError
OPENMP_CODE = '''
#include <omp.h>
int main () { return omp_get_num_threads (); }
'''
@conf
def check_openmp(self, **kw):
self.start_msg('Checking for compiler option to support OpenMP')
kw.update({'fragment': OPENMP_CODE})
try:
self.validate_c(kw)
self.run_c_code(**kw)
if 'define_name' in kw:
self.define(kw['define_name'], 1)
self.end_msg('None')
except ConfigurationError:
for flag in ('-qopenmp', '-fopenmp', '-xopenmp', '-openmp', '-mp', '-omp', '-qsmp=omp'):
try:
self.validate_c(kw) #refresh env
if kw['compiler'] == 'c':
kw['ccflags'] = kw['cflags'] = flag
elif kw['compiler'] == 'cxx':
kw['cxxflags'] = flag
else:
self.fatal('Compiler has to be "c" or "cxx"')
kw['linkflags'] = flag
kw['success'] = self.run_c_code(**kw)
self.post_check(**kw)
self.env.CCFLAGS_OMP = [ flag ]
self.env.CXXFLAGS_OMP = [ flag ]
self.env.LINKFLAGS_OMP= [ flag ]
self.end_msg(flag)
return
except ConfigurationError:
del kw['env']
continue
self.end_msg('Not supported')
if 'define_name' in kw:
self.undefine(kw['define_name'])
if kw.get('mandatory', True):
self.fatal('OpenMP is not supported')
\ No newline at end of file
......@@ -178,7 +178,7 @@ int main() {
#elif defined MOP2
typedef mop2 func_t;
#else
# error "unknown function to optimize"
typedef mop2 func_t;
#endif
typedef stat::ParetoBenchmark<func_t> stat_t;
......@@ -186,7 +186,7 @@ int main() {
Parego<Params, stat_fun<stat_t> > opt;
#elif defined(NSBO)
Nsbo<Params, stat_fun<stat_t> > opt;
#elif defined(EHVI)
#else
Ehvi<Params, stat_fun<stat_t> > opt;
#endif
......
......@@ -243,10 +243,10 @@ cmaes_init(cmaes_t *t, /* "this" */
double dtest, trace;
static const char * version = "3.11.02.beta";
/*if (t->version!=NULL && strcmp(version, t->version) == 0) {
/*if (t->version && strcmp(version, t->version) == 0) {
ERRORMESSAGE("cmaes_init called twice, which will lead to a memory leak, use cmaes_exit first",0,0,0);
printf("Warning: cmaes_init called twice, which will lead to a memory leak, use cmaes_exit first\n");
}*/
}*/
t->version = version;
/* assign_string(&t->signalsFilename, "cmaes_signals.par"); */
......
......@@ -28,7 +28,7 @@ struct Params {
BO_PARAM(int, dump_period, 1);
};
struct init {
BO_PARAM(int, nb_samples, 5);
BO_PARAM(int, nb_samples, 5);
};
struct maxiterations {
BO_PARAM(int, n_iterations, 20);
......
#ifndef BO_BASE_HPP_
#define BO_BASE_HPP_
#define BOOST_PARAMETER_MAX_ARITY 7
#include <vector>
#include <iostream>
#include <boost/parameter.hpp>
......@@ -179,7 +179,7 @@ namespace limbo {
}
template<typename BO>
bool _pursue(const BO& bo) const {
stopping_criterion::ChainCriteria<BO> chain(bo);
stopping_criterion::ChainCriteria<BO> chain(bo);
return boost::fusion::accumulate(_stopping_criteria, true, chain);
}
template<typename BO>
......
#ifndef BO_MULTI_HPP_
#define BO_MULTI_HPP_
#define VERSION "xxx"
#ifndef USE_SFERES
#warning No sferes
#else
#include <sferes/phen/parameters.hpp>
#include <sferes/gen/evo_float.hpp>
#include <sferes/eval/parallel.hpp>
#include <sferes/modif/dummy.hpp>
#include <sferes/ea/nsga2.hpp>
#endif
#include "bo_base.hpp"
#include "pareto.hpp"
namespace limbo {
namespace multi {
#ifdef USE_SFERES
struct SferesParams {
struct evo_float {
typedef sferes::gen::evo_float::mutation_t mutation_t;
......@@ -63,8 +70,9 @@ namespace limbo {
std::vector<M> _models;
std::vector<float> _objs;
};
}
#endif
}
template <
class Params
......@@ -118,6 +126,8 @@ namespace limbo {
std::cout.flush();
this->_update_models();
std::cout << "ok" << std::endl;
#ifdef USE_SFERES
typedef sferes::gen::EvoFloat<D, multi::SferesParams> gen_t;
typedef sferes::phen::Parameters<gen_t, multi::SferesFit<model_t>, multi::SferesParams> phen_t;
typedef sferes::eval::Parallel<multi::SferesParams> eval_t;
......@@ -125,6 +135,8 @@ namespace limbo {
typedef sferes::modif::Dummy<> modifier_t;
typedef sferes::ea::Nsga2<phen_t, eval_t, stat_t, modifier_t, multi::SferesParams> nsga2_t;
// commented to remove a dependency to a particular version of sferes
nsga2_t ea;
ea.set_fit_proto(multi::SferesFit<model_t>(_models));
ea.run();
......@@ -143,7 +155,7 @@ namespace limbo {
}
_pareto_model[p] = std::make_tuple(point, objs, sigma);
}
#endif
}
protected:
......
#ifndef BOPTIMIZER_HPP_
#define BOPTIMIZER_HPP_
#include <type_traits>
#include "bo_base.hpp"
namespace limbo {
bool compareVectorXd(Eigen::VectorXd i, Eigen::VectorXd j) { return i(0)<j(0); }
template <
......@@ -20,7 +17,7 @@ namespace limbo {
, class A7 = boost::parameter::void_
>
class BOptimizer : public BoBase<Params, A1, A2, A3, A4, A5, A6, A7> {
public:
public:
typedef BoBase<Params, A1, A2, A3, A4, A5, A6, A7> base_t;
typedef typename base_t::obs_t obs_t;
typedef typename base_t::model_t model_t;
......@@ -28,8 +25,7 @@ namespace limbo {
typedef typename base_t::acquisition_function_t acquisition_function_t;
template<typename EvalFunction>
void optimize(const EvalFunction& feval, bool reset = true) {
// static_assert(std::is_floating_point<obs_t>::value, "BOptimizer wants double/double for obs");
void optimize(const EvalFunction& feval, bool reset = true) {
this->_init(feval, reset);
_model = model_t(EvalFunction::dim_in, EvalFunction::dim_out);
......@@ -38,46 +34,37 @@ namespace limbo {
inner_optimization_t inner_optimization;
while (this->_samples.size() == 0 || this->_pursue(*this)) {
acquisition_function_t acqui(_model, this->_iteration);
Eigen::VectorXd new_sample = inner_optimization(acqui, acqui.dim_in());
this->add_new_sample(new_sample, feval(new_sample));
std::cout << this->_iteration << " new point: "
<< this->_samples[this->_samples.size() - 1].transpose()
<< " value: " << this->_observations[this->_observations.size() - 1].transpose()
// << " mu: "<< _model.mu(this->_samples[this->_samples.size() - 1]).transpose()
// << " mean: " << _model.mean_function()(new_sample,_model).transpose()
// << " sigma: "<< _model.sigma(this->_samples[this->_samples.size() - 1])
// << " acqui: "<< acqui(this->_samples[this->_samples.size() - 1])
<< " best:" << this->best_observation().transpose()
<< std::endl;
Eigen::VectorXd new_sample = inner_optimization(acqui, acqui.dim_in());
this->add_new_sample(new_sample, feval(new_sample));
_model.compute(this->_samples, this->_observations, Params::boptimizer::noise());
this->_update_stats(*this);
this->_iteration++;