Commit c50f29fa authored by Antoine Cully's avatar Antoine Cully
Browse files

avoid to recompute the kernel when not needed in MeanLF

parent f564915a
......@@ -238,6 +238,14 @@ namespace limbo {
this->_compute_full_kernel();
}
/// recomputes the internal variable related to the mean object without recomputing the kernel
void recompute_mean_internal()
{
assert(!_samples.empty());
this->_compute_obs_mean();
this->_compute_alpha();
}
/// return the likelihood (do not compute it!)
double get_lik() const { return _lik; }
......
......@@ -65,21 +65,29 @@ namespace limbo {
auto params = optimizer(optimization, gp.mean_function().h_params(), false);
gp.mean_function().set_h_params(params);
gp.set_lik(opt::eval(optimization, params));
gp.recompute(true);
gp.recompute_mean_internal();
}
protected:
template <typename GP>
struct MeanLFOptimization {
public:
MeanLFOptimization(const GP& gp) : _original_gp(gp) {}
MeanLFOptimization(const GP& gp) : _original_gp(gp)
{
size_t n = gp.obs_mean().rows();
// K^{-1} using Cholesky decomposition
_K = Eigen::MatrixXd::Identity(n, n);
gp.matrixL().template triangularView<Eigen::Lower>().solveInPlace(_K);
gp.matrixL().template triangularView<Eigen::Lower>().transpose().solveInPlace(_K);
}
opt::eval_t operator()(const Eigen::VectorXd& params, bool compute_grad) const
{
GP gp(this->_original_gp);
gp.mean_function().set_h_params(params);
gp.recompute(true);
gp.recompute_mean_internal();
size_t n = gp.obs_mean().rows();
......@@ -97,15 +105,10 @@ namespace limbo {
if (!compute_grad)
return opt::no_grad(lik);
// K^{-1} using Cholesky decomposition
Eigen::MatrixXd K = Eigen::MatrixXd::Identity(n, n);
gp.matrixL().template triangularView<Eigen::Lower>().solveInPlace(K);
gp.matrixL().template triangularView<Eigen::Lower>().transpose().solveInPlace(K);
Eigen::VectorXd grad = Eigen::VectorXd::Zero(params.size());
for (int i_obs = 0; i_obs < gp.dim_out(); ++i_obs)
for (size_t n_obs = 0; n_obs < n; n_obs++) {
grad.tail(gp.mean_function().h_params_size()) += gp.obs_mean().col(i_obs).transpose() * K.col(n_obs) * gp.mean_function().grad(gp.samples()[n_obs], gp).row(i_obs);
grad.tail(gp.mean_function().h_params_size()) += gp.obs_mean().col(i_obs).transpose() * _K.col(n_obs) * gp.mean_function().grad(gp.samples()[n_obs], gp).row(i_obs);
}
return {lik, grad};
......@@ -113,6 +116,7 @@ namespace limbo {
protected:
const GP& _original_gp;
Eigen::MatrixXd _K;
};
};
}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment