Skip to content
GitLab
Menu
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Matthias Mayr
limbo
Commits
cf68fda5
Commit
cf68fda5
authored
Jul 29, 2014
by
Jean-Baptiste Mouret
Browse files
first version of ns_ego
parent
14dc004f
Changes
10
Hide whitespace changes
Inline
Side-by-side
src/examples/parego.cpp
View file @
cf68fda5
...
...
@@ -8,12 +8,12 @@ struct Params {
BO_PARAM
(
int
,
dump_period
,
-
1
);
};
struct
init
{
BO_PARAM
(
int
,
nb_samples
,
1
0
);
BO_PARAM
(
int
,
nb_samples
,
2
1
);
// calandra: number of dimensions * 5
// knowles : 11 * dim - 1
};
struct
maxiterations
{
BO_PARAM
(
int
,
n_iterations
,
2
0
);
BO_PARAM
(
int
,
n_iterations
,
3
0
);
};
struct
ucb
:
public
defaults
::
ucb
{};
struct
gp_ucb
:
public
defaults
::
gp_ucb
{};
...
...
@@ -58,7 +58,28 @@ struct mop2 {
}
};
// note UCB ne marche pas si fonction positive _ou_ negative?
struct
Pareto
{
template
<
typename
BO
>
void
operator
()(
const
BO
&
opt
)
{
if
(
opt
.
iteration
()
%
10
!=
0
)
return
;
auto
p_model
=
opt
.
model_pareto_front
(
0
,
1.0
,
0.005
);
auto
p_data
=
opt
.
data_pareto_front
();
std
::
string
it
=
std
::
to_string
(
opt
.
iteration
());
std
::
string
model
=
it
+
"_pareto_model.dat"
;
std
::
string
data
=
it
+
"_pareto_data.dat"
;
std
::
ofstream
pareto_model
(
model
.
c_str
()),
pareto_data
(
data
.
c_str
());
for
(
auto
x
:
p_model
)
pareto_model
<<
std
::
get
<
1
>
(
x
).
transpose
()
<<
" "
<<
(
std
::
get
<
1
>
(
x
).
array
()
+
std
::
get
<
2
>
(
x
)).
transpose
()
<<
" "
<<
(
std
::
get
<
1
>
(
x
).
array
()
-
std
::
get
<
2
>
(
x
)).
transpose
()
<<
" "
<<
std
::
endl
;
for
(
auto
x
:
p_data
)
pareto_data
<<
std
::
get
<
1
>
(
x
).
transpose
()
<<
std
::
endl
;
}
};
int
main
()
{
...
...
@@ -68,8 +89,7 @@ int main() {
// typedef model::GP<Params, kernel_t, mean_t> gp_t;
// typedef acquisition_functions::UCB<Params, gp_t> ucb_t;
//Parego<Params, model_fun<gp_t>, acq_fun<ucb_t> > opt;
{
Parego
<
Params
>
opt
;
Parego
<
Params
,
stat_fun
<
Pareto
>
>
opt
;
opt
.
optimize
(
mop2
());
auto
p_model
=
opt
.
model_pareto_front
(
0
,
1.0
,
0.001
);
...
...
@@ -84,7 +104,6 @@ int main() {
<<
std
::
endl
;
for
(
auto
x
:
p_data
)
pareto_data
<<
std
::
get
<
1
>
(
x
).
transpose
()
<<
std
::
endl
;
}
return
0
;
}
src/examples/wscript
View file @
cf68fda5
...
...
@@ -15,3 +15,9 @@ def build(bld):
target
=
'parego'
,
uselib
=
'BOOST EIGEN TBB'
,
use
=
'limbo'
)
obj
=
bld
.
program
(
features
=
'cxx'
,
source
=
'ns_ego.cpp'
,
includes
=
'. .. ../../'
,
target
=
'ns_ego'
,
uselib
=
'BOOST EIGEN TBB SFERES'
,
use
=
'limbo'
)
src/limbo/gp_auto.hpp
View file @
cf68fda5
...
...
@@ -18,11 +18,13 @@ namespace limbo {
namespace
defaults
{
struct
gp_auto
{
BO_PARAM
(
int
,
n_rprop
,
100
);
BO_PARAM
(
int
,
rprop_restart
,
2
0
);
BO_PARAM
(
int
,
rprop_restart
,
10
0
);
};
}
namespace
model
{
namespace
impl
{
// launch nprop_restart instances of rprop in parallel
// to optimize the log-likelihood
#ifdef USE_TBB
template
<
typename
GP
>
struct
RpropOpt
{
...
...
@@ -48,8 +50,12 @@ namespace limbo {
void
join
(
const
RpropOpt
&
y
)
{
_keep_best
(
y
.
_best_score
,
y
.
_best_v
);
}
const
Eigen
::
VectorXd
&
best_v
()
const
{
return
_best_v
;
}
double
best_score
()
const
{
return
_best_score
;
}
const
Eigen
::
VectorXd
&
best_v
()
const
{
return
_best_v
;
}
double
best_score
()
const
{
return
_best_score
;
}
protected:
void
_keep_best
(
float
y
,
const
Eigen
::
VectorXd
&
v
)
{
if
(
y
>
_best_score
)
{
...
...
@@ -115,7 +121,6 @@ namespace limbo {
// Eigen::MatrixXd alpha = this->_inverted_kernel * this->_obs_mean;
// Eigen::MatrixXd w = alpha * alpha.transpose() - this->_inverted_kernel;
// alpha = K^{-1} * this->_obs_mean;
Eigen
::
VectorXd
alpha
=
this
->
_llt
.
solve
(
this
->
_obs_mean
);
this
->
_llt
.
matrixL
().
adjoint
().
solveInPlace
(
alpha
);
...
...
@@ -145,12 +150,12 @@ namespace limbo {
}
protected:
#ifdef USE_TBB
void
_optimize_likelihood_tbb
()
{
par
::
init
();
impl
::
RpropOpt
<
GPAuto
>
r
(
*
this
,
Params
::
gp_auto
::
n_rprop
());
tbb
::
parallel_reduce
(
tbb
::
blocked_range
<
size_t
>
(
0
,
Params
::
gp_auto
::
rprop_restart
()),
r
);
this
->
_kernel_function
.
set_h_params
(
r
.
best_v
());
}
void
_optimize_likelihood_tbb
()
{
par
::
init
();
impl
::
RpropOpt
<
GPAuto
>
r
(
*
this
,
Params
::
gp_auto
::
n_rprop
());
tbb
::
parallel_reduce
(
tbb
::
blocked_range
<
size_t
>
(
0
,
Params
::
gp_auto
::
rprop_restart
()),
r
);
this
->
_kernel_function
.
set_h_params
(
r
.
best_v
());
}
#endif
void
_optimize_likelihood
()
{
double
best_score
=
-
std
::
numeric_limits
<
float
>::
max
();
...
...
src/limbo/ns_ego.hpp
0 → 100644
View file @
cf68fda5
#ifndef NSEGO_HPP_
#define NSEGO_HPP_
#include
<algorithm>
#define VERSION "xxx"
#include
<sferes/phen/parameters.hpp>
#include
<sferes/gen/evo_float.hpp>
#include
<sferes/eval/parallel.hpp>
#include
<sferes/modif/dummy.hpp>
#include
<sferes/ea/nsga2.hpp>
#include
"parego.hpp"
namespace
limbo
{
namespace
ns_ego
{
struct
SferesParams
{
struct
evo_float
{
typedef
sferes
::
gen
::
evo_float
::
mutation_t
mutation_t
;
typedef
sferes
::
gen
::
evo_float
::
cross_over_t
cross_over_t
;
SFERES_CONST
float
cross_rate
=
0.5
f
;
SFERES_CONST
float
mutation_rate
=
0.1
f
;
SFERES_CONST
float
eta_m
=
15.0
f
;
SFERES_CONST
float
eta_c
=
10.0
f
;
SFERES_CONST
mutation_t
mutation_type
=
sferes
::
gen
::
evo_float
::
polynomial
;
SFERES_CONST
cross_over_t
cross_over_type
=
sferes
::
gen
::
evo_float
::
sbx
;
};
struct
pop
{
SFERES_CONST
unsigned
size
=
100
;
SFERES_CONST
unsigned
nb_gen
=
1000
;
SFERES_CONST
int
dump_period
=
-
1
;
SFERES_CONST
int
initial_aleat
=
1
;
};
struct
parameters
{
SFERES_CONST
float
min
=
0.0
f
;
SFERES_CONST
float
max
=
1.0
f
;
};
};
template
<
typename
M
>
class
SferesFit
{
public:
SferesFit
(
const
std
::
vector
<
M
>&
models
)
:
_models
(
models
)
{
}
SferesFit
()
{}
const
std
::
vector
<
float
>&
objs
()
const
{
return
_objs
;
}
float
obj
(
size_t
i
)
const
{
return
_objs
[
i
];
}
template
<
typename
Indiv
>
void
eval
(
const
Indiv
&
indiv
)
{
this
->
_objs
.
resize
(
_models
.
size
());
Eigen
::
VectorXd
v
(
indiv
.
size
());
for
(
size_t
j
=
0
;
j
<
indiv
.
size
();
++
j
)
v
[
j
]
=
indiv
.
data
(
j
);
for
(
size_t
i
=
0
;
i
<
_models
.
size
();
++
i
)
this
->
_objs
[
i
]
=
_models
[
i
].
mu
(
v
);
}
protected:
std
::
vector
<
M
>
_models
;
std
::
vector
<
float
>
_objs
;
};
}
template
<
class
Params
,
class
A2
=
boost
::
parameter
::
void_
,
class
A3
=
boost
::
parameter
::
void_
,
class
A4
=
boost
::
parameter
::
void_
,
class
A5
=
boost
::
parameter
::
void_
,
class
A6
=
boost
::
parameter
::
void_
,
class
A7
=
boost
::
parameter
::
void_
>
class
NsEgo
:
public
Parego
<
Params
,
A2
,
A3
,
A4
,
A5
,
A6
,
A7
>
{
public:
typedef
BoBase
<
Params
,
obs_type
<
Eigen
::
VectorXd
>
,
A2
,
A3
,
A4
,
A5
,
A6
,
A7
>
base_t
;
typedef
typename
base_t
::
obs_t
obs_t
;
typedef
typename
base_t
::
model_t
model_t
;
typedef
typename
base_t
::
inner_optimization_t
inner_optimization_t
;
typedef
typename
base_t
::
acquisition_function_t
acquisition_function_t
;
typedef
std
::
tuple
<
Eigen
::
VectorXd
,
Eigen
::
VectorXd
,
double
>
pareto_point_t
;
typedef
std
::
vector
<
pareto_point_t
>
pareto_t
;
template
<
typename
EvalFunction
>
void
optimize
(
const
EvalFunction
&
feval
,
bool
reset
=
true
)
{
this
->
_init
(
feval
,
reset
);
size_t
nb_objs
=
this
->
_observations
[
0
].
size
();
using
namespace
ns_ego
;
typedef
sferes
::
gen
::
EvoFloat
<
EvalFunction
::
dim
,
SferesParams
>
gen_t
;
typedef
sferes
::
phen
::
Parameters
<
gen_t
,
SferesFit
<
model_t
>
,
SferesParams
>
phen_t
;
typedef
sferes
::
eval
::
Parallel
<
SferesParams
>
eval_t
;
typedef
boost
::
fusion
::
vector
<>
stat_t
;
typedef
sferes
::
modif
::
Dummy
<>
modifier_t
;
typedef
sferes
::
ea
::
Nsga2
<
phen_t
,
eval_t
,
stat_t
,
modifier_t
,
SferesParams
>
nsga2_t
;
while
(
this
->
_samples
.
size
()
==
0
||
this
->
_pursue
())
{
// compute a model for each dimension
std
::
vector
<
model_t
>
models
(
nb_objs
,
model_t
(
nb_objs
));
std
::
vector
<
std
::
vector
<
double
>
>
uni_obs
(
nb_objs
);
for
(
size_t
i
=
0
;
i
<
this
->
_observations
.
size
();
++
i
)
for
(
size_t
j
=
0
;
j
<
this
->
_observations
[
i
].
size
();
++
j
)
uni_obs
[
j
].
push_back
(
this
->
_observations
[
i
][
j
]);
for
(
size_t
i
=
0
;
i
<
uni_obs
.
size
();
++
i
)
{
for
(
size_t
j
=
0
;
j
<
uni_obs
[
i
].
size
();
++
j
)
std
::
cout
<<
uni_obs
[
i
][
j
]
<<
" "
;
std
::
cout
<<
std
::
endl
;
models
[
i
].
compute
(
this
->
_samples
,
uni_obs
[
i
],
0.0
);
}
nsga2_t
ea
;
ea
.
set_fit_proto
(
SferesFit
<
model_t
>
(
models
));
ea
.
run
();
auto
pareto_front
=
ea
.
pareto_front
();
std
::
sort
(
pareto_front
.
begin
(),
pareto_front
.
end
(),
sferes
::
fit
::
compare_objs_lex
());
std
::
ofstream
ofs
(
"pareto.dat"
);
Eigen
::
VectorXd
best_v
(
EvalFunction
::
dim
);
Eigen
::
VectorXd
x
(
best_v
.
size
());
double
best_sigma
=
-
1
;
for
(
size_t
i
=
0
;
i
<
pareto_front
.
size
();
++
i
)
{
double
sigma
=
0
;
for
(
size_t
j
=
0
;
j
<
x
.
size
();
++
j
)
x
[
j
]
=
pareto_front
[
i
]
->
data
(
j
);
for
(
size_t
j
=
0
;
j
<
models
.
size
();
++
j
)
sigma
+=
models
[
j
].
sigma
(
x
);
auto
s
=
Eigen
::
VectorXd
(
2
);
s
<<
sqrt
(
2.0
),
sqrt
(
2.0
);
Eigen
::
VectorXd
p
(
2
);
p
<<
pareto_front
[
i
]
->
fit
().
obj
(
0
),
pareto_front
[
i
]
->
fit
().
obj
(
1
);
ofs
<<
p
.
transpose
()
<<
" "
<<
(
p
+
s
*
sigma
).
transpose
()
<<
" "
<<
(
p
-
s
*
sigma
).
transpose
()
<<
std
::
endl
;
if
(
sigma
>
best_sigma
)
{
best_sigma
=
sigma
;
best_v
=
x
;
}
}
this
->
add_new_sample
(
best_v
,
feval
(
best_v
));
this
->
_iteration
++
;
std
::
cout
<<
this
->
_iteration
<<
" | "
<<
best_v
.
transpose
()
<<
std
::
endl
;
}
}
protected:
void
_update_stats
()
{
boost
::
fusion
::
for_each
(
this
->
_stat
,
RefreshStat_f
<
NsEgo
>
(
*
this
));
}
};
}
#endif
src/limbo/parallel.hpp
View file @
cf68fda5
#ifndef PARALLEL_HPP_
#define PARALLEL_HPP_
#ifndef
LIMBO_
PARALLEL_HPP_
#define
LIMBO_
PARALLEL_HPP_
#include
<vector>
#ifdef USE_TBB
#include
<tbb/concurrent_vector.h>
...
...
src/limbo/parego.hpp
View file @
cf68fda5
...
...
@@ -2,6 +2,7 @@
#define PAREGO_HPP_
#include
<algorithm>
#include
"limbo.hpp"
#include
"bo_base.hpp"
#include
"pareto.hpp"
...
...
@@ -98,7 +99,11 @@ namespace limbo {
return
pareto
::
pareto_set
(
_pack_data
(
points
,
objs
,
sigma
));
}
protected:
void
_update_stats
()
{
boost
::
fusion
::
for_each
(
this
->
_stat
,
RefreshStat_f
<
Parego
>
(
*
this
));
}
pareto_t
_pack_data
(
const
std
::
vector
<
Eigen
::
VectorXd
>&
points
,
const
std
::
vector
<
Eigen
::
VectorXd
>&
objs
,
const
std
::
vector
<
double
>&
sigma
)
const
{
...
...
src/limbo/rand.hpp
View file @
cf68fda5
...
...
@@ -37,8 +37,8 @@
#ifndef RAND_HPP_
#define RAND_HPP_
#ifndef
LIMBO_
RAND_HPP_
#define
LIMBO_
RAND_HPP_
#include
<cstdlib>
#include
<cmath>
...
...
src/limbo/stat.hpp
View file @
cf68fda5
#ifndef STAT_HPP_
#define STAT_HPP_
#ifndef
LIMBO_
STAT_HPP_
#define
LIMBO_
STAT_HPP_
#include
<fstream>
#include
<string>
...
...
src/limbo/sys.hpp
View file @
cf68fda5
...
...
@@ -35,8 +35,8 @@
#ifndef SYS_HPP_
#define SYS_HPP_
#ifndef
LIMBO_
SYS_HPP_
#define
LIMBO_
SYS_HPP_
#include
<ctime>
#include
<unistd.h>
...
...
wscript
View file @
cf68fda5
...
...
@@ -15,6 +15,7 @@ def options(opt):
opt
.
load
(
'compiler_c'
)
opt
.
load
(
'eigen'
)
opt
.
load
(
'tbb'
)
opt
.
load
(
'sferes'
)
def
configure
(
conf
):
print
(
"configuring b-optimize"
)
...
...
@@ -22,6 +23,7 @@ def configure(conf):
conf
.
load
(
'compiler_c'
)
conf
.
load
(
'eigen'
)
conf
.
load
(
'tbb'
)
conf
.
load
(
'sferes'
)
common_flags
=
"-Wall -std=c++11 -fcolor-diagnostics"
...
...
@@ -30,9 +32,13 @@ def configure(conf):
min_version
=
'1.35'
)
conf
.
check_eigen
()
conf
.
check_tbb
()
conf
.
check_sferes
()
if
conf
.
is_defined
(
'USE_TBB'
):
common_flags
+=
" -DUSE_TBB "
if
conf
.
is_defined
(
'USE_SFERES'
):
common_flags
+=
" -DUSE_SFERES "
# release
opt_flags
=
common_flags
+
' -O3 -msse2 -ggdb3'
conf
.
env
[
'CXXFLAGS'
]
=
cxxflags
+
opt_flags
.
split
(
' '
)
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment