Commit 0760e54f authored by Aneoshun's avatar Aneoshun
Browse files

add gp_auto_mean

parent 094c508e
......@@ -32,15 +32,15 @@ namespace limbo {
// static_assert(std::is_floating_point<obs_t>::value, "BOptimizer wants double/double for obs");
this->_init(feval, reset);
model_t model(EvalFunction::dim_in, EvalFunction::dim_out);
_model = model_t(EvalFunction::dim_in, EvalFunction::dim_out);
if(this->_observations.size())
model.compute(this->_samples, this->_observations, Params::boptimizer::noise());
_model.compute(this->_samples, this->_observations, Params::boptimizer::noise());
inner_optimization_t inner_optimization;
while (this->_samples.size() == 0 || this->_pursue(*this)) {
acquisition_function_t acqui(model, this->_iteration);
acquisition_function_t acqui(_model, this->_iteration);
Eigen::VectorXd new_sample = inner_optimization(acqui, acqui.dim_in());
......@@ -50,14 +50,15 @@ namespace limbo {
std::cout << this->_iteration << " new point: "
<< this->_samples[this->_samples.size() - 1].transpose()
<< " value: " << this->_observations[this->_observations.size() - 1].transpose()
// << " mu: "<< model.mu(this->_samples[this->_samples.size() - 1]).transpose()
// << " sigma: "<< model.sigma(this->_samples[this->_samples.size() - 1])
// << " acqui: "<< acqui(this->_samples[this->_samples.size() - 1])
// << " mu: "<< _model.mu(this->_samples[this->_samples.size() - 1]).transpose()
// << " mean: " << _model.mean_function()(new_sample,_model).transpose()
// << " sigma: "<< _model.sigma(this->_samples[this->_samples.size() - 1])
// << " acqui: "<< acqui(this->_samples[this->_samples.size() - 1])
<< " best:" << this->best_observation().transpose()
<< std::endl;
model.compute(this->_samples, this->_observations, Params::boptimizer::noise());
_model.compute(this->_samples, this->_observations, Params::boptimizer::noise());
this->_update_stats(*this);
......@@ -76,6 +77,14 @@ namespace limbo {
return this->_samples[std::distance(this->_observations.begin(), max_e)];
}
const model_t& model() const {
return _model;
}
protected:
model_t _model;
};
......
......@@ -98,6 +98,7 @@ namespace limbo {
protected:
int _dim_in;
int _dim_out;
KernelFunction _kernel_function;
MeanFunction _mean_function;
......
......@@ -25,6 +25,7 @@ namespace limbo {
template<typename Params, typename KernelFunction, typename MeanFunction, typename ObsType=Eigen::VectorXd>
class GPAuto : public GP<Params, KernelFunction, MeanFunction> {
public:
GPAuto():GP<Params, KernelFunction, MeanFunction>() {}
// TODO : init KernelFunction with dim in GP
GPAuto(int dim_in, int dim_out) : GP<Params, KernelFunction, MeanFunction>(dim_in, dim_out) {}
......@@ -35,8 +36,9 @@ namespace limbo {
GP<Params, KernelFunction, MeanFunction>::compute(samples, observations, noise);
_optimize_likelihood();
this->_compute_obs_mean(); //ORDER MATTERS
this->_compute_kernel();
this->_compute_kernel();
}
Eigen::VectorXd check_inverse(){
......@@ -47,8 +49,11 @@ namespace limbo {
virtual double log_likelihood(const Eigen::VectorXd& h_params,
bool update_kernel = true) {
this->_kernel_function.set_h_params(h_params);
if (update_kernel)
this->_compute_kernel();
if (update_kernel){
this->_compute_obs_mean(); // ORDER MATTERS
this->_compute_kernel();
}
size_t n = this->_obs_mean.rows();
// --- cholesky ---
......@@ -69,8 +74,10 @@ namespace limbo {
virtual Eigen::VectorXd log_likelihood_grad(const Eigen::VectorXd& h_params,
bool update_kernel = true) {
this->_kernel_function.set_h_params(h_params);
if (update_kernel)
if (update_kernel){
this->_compute_obs_mean();
this->_compute_kernel();
}
size_t n = this->_observations.rows();
/// what we should write, but it is less numerically stable than using the Cholesky decomposition
......@@ -103,7 +110,11 @@ namespace limbo {
}
return grad;
}
float get_lik()const{return _lik;}
protected:
float _lik;
virtual void _optimize_likelihood() {
par::init();
typedef std::pair<Eigen::VectorXd, double> pair_t;
......@@ -128,7 +139,7 @@ namespace limbo {
auto m = par::max(init, Params::gp_auto::rprop_restart(), body, comp);
std::cout << "likelihood:" << m.second << std::endl;
this->_kernel_function.set_h_params(m.first);
this->_lik=m.second;
}
};
}
......
......@@ -25,8 +25,9 @@ namespace limbo {
template<typename Params, typename KernelFunction, typename MeanFunction, typename ObsType=Eigen::VectorXd>
class GPAutoMean : public GP<Params, KernelFunction, MeanFunction> {
public:
GPAutoMean(): GP<Params, KernelFunction, MeanFunction>() {}
// TODO : init KernelFunction with dim in GP
GPAutoMean(int dim_in, int dim_out) : GP<Params, KernelFunction, MeanFunction>(dim_in, dim_out) {std::cout<<" H PARAMS SIZE: "<<this->_kernel_function.h_params_size()<<std::endl;}
GPAutoMean(int dim_in, int dim_out) : GP<Params, KernelFunction, MeanFunction>(dim_in, dim_out) {}
void compute(const std::vector<Eigen::VectorXd>& samples,
const std::vector<ObsType>& observations,
......@@ -37,7 +38,9 @@ namespace limbo {
// std::cout<<"kernel params: "<<this->_kernel_function.h_params().transpose()<<std::endl;
// std::cout<<"mean params: "<<this->_mean_function.h_params().transpose()<<std::endl;
this->_compute_kernel();
this->_compute_obs_mean(); // ORDER MATTERS
this->_compute_kernel();
}
Eigen::VectorXd check_inverse(){
......@@ -50,8 +53,8 @@ namespace limbo {
this->_kernel_function.set_h_params(h_params.head(this->_kernel_function.h_params_size()));
this->_mean_function.set_h_params(h_params.tail(this->_mean_function.h_params_size()));
if (update_kernel){
this->_compute_kernel();
this->_compute_obs_mean();
this->_compute_kernel();
}
size_t n = this->_obs_mean.rows();
......@@ -64,8 +67,9 @@ namespace limbo {
// double a = this->_obs_mean.col(0).dot(this->_alpha.col(0));
double a = (this->_obs_mean.transpose() * this->_alpha).trace(); // generalization for multi dimensional observation
// std::cout<<" a: "<<a <<" det: "<< det<<std::endl;
//std::cout<<" a: "<<a <<" det: "<< det<<std::endl;
double lik= -0.5 * a - 0.5 * det - 0.5 * n * log(2 * M_PI);
//std::cout<<h_params.transpose()<<" "<<lik<<std::endl;
return lik;
}
......@@ -75,8 +79,10 @@ namespace limbo {
this->_kernel_function.set_h_params(h_params.head(this->_kernel_function.h_params_size()));
this->_mean_function.set_h_params(h_params.tail(this->_mean_function.h_params_size()));
if (update_kernel)
if (update_kernel){
this->_compute_obs_mean(); //ORDER MATTERS
this->_compute_kernel();
}
size_t n = this->_observations.rows();
/// what we should write, but it is less numerically stable than using the Cholesky decomposition
......@@ -115,7 +121,10 @@ namespace limbo {
}
return grad;
}
float get_lik()const{return _lik;}
protected:
float _lik;
virtual void _optimize_likelihood() {
par::init();
typedef std::pair<Eigen::VectorXd, double> pair_t;
......@@ -139,9 +148,10 @@ namespace limbo {
pair_t init(Eigen::VectorXd::Zero(1), -std::numeric_limits<float>::max());
auto m = par::max(init, Params::gp_auto_mean::rprop_restart(), body, comp);
std::cout << "likelihood:" << m.second << std::endl;
this->_kernel_function.set_h_params(m.first.head(this->_kernel_function.h_params_size()));
this->_mean_function.set_h_params(m.first.tail(this->_mean_function.h_params_size()));
this->_lik=m.second;
}
};
}
......
......@@ -28,7 +28,7 @@ namespace limbo {
void operator()(const F& feval, Opt& opt) const {
for (int i = 0; i < Params::init::nb_samples(); i++) {
Eigen::VectorXd new_sample(F::dim_in);
for (size_t i = 0; i < F::dim_in; i++)
for (int i = 0; i < F::dim_in; i++)
new_sample[i] = misc::rand<double>(0, 1);
std::cout << "random sample:" << new_sample.transpose() << std::endl;
opt.add_new_sample(new_sample, feval(new_sample));
......
......@@ -55,7 +55,7 @@ namespace limbo {
*/
template<typename Params>
struct SquaredExpARD {
SquaredExpARD(int dim) : _sf2(0), _ell(dim), _input_dim(dim){
SquaredExpARD(int dim=1) : _sf2(0), _ell(dim), _input_dim(dim){
this->set_h_params(Eigen::VectorXd::Ones(_ell.size()+1)*-1);
}
size_t h_params_size() const {
......
......@@ -42,10 +42,10 @@ namespace limbo {
ObsType operator()(const Eigen::VectorXd& v, const GP& gp)const {
return gp.mean_observation().array();
}
};
template<typename Params,typename ObsType=Eigen::VectorXd>
struct MeanArchive {
MeanArchive(size_t dim_out = 1) {
......@@ -85,7 +85,7 @@ namespace limbo {
template<typename Params,typename MeanFunction,typename ObsType=Eigen::VectorXd>
struct MeanFunctionARD {
MeanFunctionARD(size_t dim_out):_mean_function(dim_out), _tr(dim_out,dim_out+1) {
MeanFunctionARD(size_t dim_out=1):_mean_function(dim_out), _tr(dim_out,dim_out+1) {
Eigen::VectorXd h=Eigen::VectorXd::Zero(dim_out*(dim_out+1));
for(size_t i=0;i<dim_out;i++)
h[i*(dim_out+2)]=1;
......@@ -129,7 +129,7 @@ namespace limbo {
};
}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment