Commit d83615b9 authored by Konstantinos Chatzilygeroudis's avatar Konstantinos Chatzilygeroudis Committed by GitHub
Browse files

Merge pull request #175 from resibots/check_grad

Gradient check test via finite differences
parents ca437ae9 0bae270f
......@@ -62,8 +62,8 @@ namespace limbo {
this->_called = true;
KernelLFOptimization<GP> optimization(gp);
Optimizer optimizer;
auto params = optimizer(optimization, (gp.kernel_function().h_params().array() + 6.0) / 7.0, true);
gp.kernel_function().set_h_params(-6.0 + params.array() * 7.0);
auto params = optimizer(optimization, gp.kernel_function().h_params(), false);
gp.kernel_function().set_h_params(params);
gp.set_lik(opt::eval(optimization, params));
gp.recompute(false);
}
......@@ -77,7 +77,7 @@ namespace limbo {
opt::eval_t operator()(const Eigen::VectorXd& params, bool compute_grad) const
{
GP gp(this->_original_gp);
gp.kernel_function().set_h_params(-6.0 + params.array() * 7.0);
gp.kernel_function().set_h_params(params);
gp.recompute(false);
......
......@@ -64,11 +64,11 @@ namespace limbo {
Optimizer optimizer;
int dim = gp.kernel_function().h_params_size() + gp.mean_function().h_params_size();
Eigen::VectorXd init(dim);
init.head(gp.kernel_function().h_params_size()) = (gp.kernel_function().h_params().array() + 6.0) / 7.0;
init.tail(gp.mean_function().h_params_size()) = (gp.mean_function().h_params().array() + 6.0) / 7.0;
auto params = optimizer(optimization, init, true);
gp.kernel_function().set_h_params(-6.0 + params.head(gp.kernel_function().h_params_size()).array() * 7.0);
gp.mean_function().set_h_params(-6.0 + params.tail(gp.mean_function().h_params_size()).array() * 7.0);
init.head(gp.kernel_function().h_params_size()) = gp.kernel_function().h_params();
init.tail(gp.mean_function().h_params_size()) = gp.mean_function().h_params();
auto params = optimizer(optimization, init, false);
gp.kernel_function().set_h_params(params.head(gp.kernel_function().h_params_size()));
gp.mean_function().set_h_params(params.tail(gp.mean_function().h_params_size()));
gp.set_lik(opt::eval(optimization, params));
gp.recompute(true);
}
......@@ -82,8 +82,8 @@ namespace limbo {
opt::eval_t operator()(const Eigen::VectorXd& params, bool compute_grad) const
{
GP gp(this->_original_gp);
gp.kernel_function().set_h_params(-6.0 + params.head(gp.kernel_function().h_params_size()).array() * 7.0);
gp.mean_function().set_h_params(-6.0 + params.tail(gp.mean_function().h_params_size()).array() * 7.0);
gp.kernel_function().set_h_params(params.head(gp.kernel_function().h_params_size()));
gp.mean_function().set_h_params(params.tail(gp.mean_function().h_params_size()));
gp.recompute(true);
......
......@@ -62,8 +62,8 @@ namespace limbo {
this->_called = true;
MeanLFOptimization<GP> optimization(gp);
Optimizer optimizer;
auto params = optimizer(optimization, (gp.mean_function().h_params().array() + 6.0) / 7.0, true);
gp.mean_function().set_h_params(-6.0 + params.array() * 7.0);
auto params = optimizer(optimization, gp.mean_function().h_params(), false);
gp.mean_function().set_h_params(params);
gp.set_lik(opt::eval(optimization, params));
gp.recompute(true);
}
......@@ -77,7 +77,7 @@ namespace limbo {
opt::eval_t operator()(const Eigen::VectorXd& params, bool compute_grad) const
{
GP gp(this->_original_gp);
gp.mean_function().set_h_params(-6.0 + params.array() * 7.0);
gp.mean_function().set_h_params(params);
gp.recompute(true);
......
......@@ -44,6 +44,7 @@
//|
#define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MODULE test_gp
#define protected public
#include <boost/test/unit_test.hpp>
......@@ -53,13 +54,38 @@
#include <limbo/kernel/exp.hpp>
#include <limbo/kernel/squared_exp_ard.hpp>
#include <limbo/mean/constant.hpp>
#include <limbo/mean/function_ard.hpp>
#include <limbo/model/gp.hpp>
#include <limbo/model/gp/kernel_lf_opt.hpp>
#include <limbo/model/gp/kernel_mean_lf_opt.hpp>
#include <limbo/model/gp/mean_lf_opt.hpp>
#include <limbo/opt/grid_search.hpp>
#include <limbo/tools/macros.hpp>
using namespace limbo;
// Check gradient via finite differences method
template <typename F>
std::tuple<double, Eigen::VectorXd, Eigen::VectorXd> check_grad(const F& f, const Eigen::VectorXd& x, double e = 1e-4)
{
Eigen::VectorXd analytic_result, finite_diff_result;
opt::eval_t res = f(x, true);
analytic_result = opt::grad(res);
finite_diff_result = Eigen::VectorXd::Zero(x.size());
for (int j = 0; j < x.size(); j++) {
Eigen::VectorXd test1 = x, test2 = x;
test1[j] -= e;
test2[j] += e;
double res1 = opt::fun(f(test1, false));
double res2 = opt::fun(f(test2, false));
finite_diff_result[j] = (res2 - res1) / (2.0 * e);
}
return std::make_tuple((analytic_result - finite_diff_result).norm(), analytic_result, finite_diff_result);
}
Eigen::VectorXd make_v1(double x)
{
return tools::make_vector(x);
......@@ -97,6 +123,68 @@ struct Params {
};
};
BOOST_AUTO_TEST_CASE(test_gp_check_lf_grad)
{
using namespace limbo;
typedef kernel::SquaredExpARD<Params> KF_t;
typedef mean::FunctionARD<Params, mean::Constant<Params>> Mean_t;
typedef model::GP<Params, KF_t, Mean_t> GP_t;
GP_t gp(4, 2);
std::vector<Eigen::VectorXd> observations, samples, test_samples, test_samples_mean, test_samples_kernel_mean;
double e = 1e-4;
// Random samples and test samples
int N = 40, M = 10;
for (size_t i = 0; i < N; i++) {
samples.push_back(tools::random_vector(4));
observations.push_back(tools::random_vector(2));
}
for (size_t i = 0; i < M; i++) {
test_samples.push_back(tools::random_vector(4));
test_samples_mean.push_back(tools::random_vector(6));
test_samples_kernel_mean.push_back(tools::random_vector(6 + 4));
}
gp.compute(samples, observations, Eigen::VectorXd::Ones(samples.size()) * 0.01);
model::gp::KernelLFOpt<Params>::KernelLFOptimization<GP_t> kernel_optimization(gp);
Eigen::VectorXd results(M);
for (size_t i = 0; i < M; i++) {
auto res = check_grad(kernel_optimization, test_samples[i], 1e-4);
results(i) = std::get<0>(res);
// std::cout << std::get<1>(res).transpose() << " vs " << std::get<2>(res).transpose() << " --> " << results(i) << std::endl;
}
BOOST_CHECK(results.array().sum() < M * e);
model::gp::KernelMeanLFOpt<Params>::KernelMeanLFOptimization<GP_t> kernel_mean_optimization(gp);
for (size_t i = 0; i < M; i++) {
auto res = check_grad(kernel_mean_optimization, test_samples_kernel_mean[i], 1e-4);
results(i) = std::get<0>(res);
// std::cout << std::get<1>(res).transpose() << " vs " << std::get<2>(res).transpose() << " --> " << results(i) << std::endl;
}
BOOST_CHECK(results.array().sum() < M * e);
model::gp::MeanLFOpt<Params>::MeanLFOptimization<GP_t> mean_optimization(gp);
for (size_t i = 0; i < M; i++) {
auto res = check_grad(mean_optimization, test_samples_mean[i], 1e-4);
results(i) = std::get<0>(res);
// std::cout << std::get<1>(res).transpose() << " vs " << std::get<2>(res).transpose() << " --> " << results(i) << std::endl;
}
BOOST_CHECK(results.array().sum() < M * e);
}
BOOST_AUTO_TEST_CASE(test_gp_dim)
{
using namespace limbo;
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment