Commit 1b62a921 authored by Konstantinos Chatzilygeroudis's avatar Konstantinos Chatzilygeroudis
Browse files

SparseGP --> SparsifiedGP

parent 93a79a7f
......@@ -50,7 +50,7 @@
///@defgroup model_opt_defaults
#include <limbo/model/gp.hpp>
#include <limbo/model/sparse_gp.hpp>
#include <limbo/model/sparsified_gp.hpp>
#include <limbo/model/gp/kernel_lf_opt.hpp>
#include <limbo/model/gp/kernel_loo_opt.hpp>
......
......@@ -69,15 +69,15 @@ namespace limbo {
/// A sparsification based on the density of points is performed
/// until a desired number of points is reached
template <typename Params, typename KernelFunction = kernel::MaternFiveHalves<Params>, typename MeanFunction = mean::Data<Params>, typename HyperParamsOptimizer = gp::NoLFOpt<Params>>
class SparseGP : public GP<Params, KernelFunction, MeanFunction, HyperParamsOptimizer> {
class SparsifiedGP : public GP<Params, KernelFunction, MeanFunction, HyperParamsOptimizer> {
public:
using base_gp_t = GP<Params, KernelFunction, MeanFunction, HyperParamsOptimizer>;
/// useful because the model might be created before knowing anything about the process
SparseGP() : base_gp_t() {}
SparsifiedGP() : base_gp_t() {}
/// useful because the model might be created before having samples
SparseGP(int dim_in, int dim_out)
SparsifiedGP(int dim_in, int dim_out)
: base_gp_t(dim_in, dim_out) {}
/// Compute the GP from samples and observations. This call needs to be explicit!
......
......@@ -61,7 +61,7 @@
#include <limbo/model/gp/kernel_loo_opt.hpp>
#include <limbo/model/gp/kernel_mean_lf_opt.hpp>
#include <limbo/model/gp/mean_lf_opt.hpp>
#include <limbo/model/sparse_gp.hpp>
#include <limbo/model/sparsified_gp.hpp>
#include <limbo/opt/grid_search.hpp>
#include <limbo/tools/macros.hpp>
......@@ -788,7 +788,7 @@ BOOST_AUTO_TEST_CASE(test_sparse_gp)
using KF_t = kernel::SquaredExpARD<SparseParams>;
using MF_t = mean::Constant<SparseParams>;
using GP_t = model::GP<SparseParams, KF_t, MF_t, model::gp::KernelLFOpt<SparseParams, opt::Rprop<SparseParams>>>;
using SparseGP_t = model::SparseGP<SparseParams, KF_t, MF_t, model::gp::KernelLFOpt<SparseParams, opt::Rprop<SparseParams>>>;
using SparsifiedGP_t = model::SparsifiedGP<SparseParams, KF_t, MF_t, model::gp::KernelLFOpt<SparseParams, opt::Rprop<SparseParams>>>;
for (size_t i = 0; i < N; i++) {
......@@ -806,7 +806,7 @@ BOOST_AUTO_TEST_CASE(test_sparse_gp)
gp.optimize_hyperparams();
auto time_full = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::steady_clock::now() - t1).count();
SparseGP_t sgp;
SparsifiedGP_t sgp;
auto t2 = std::chrono::steady_clock::now();
sgp.compute(samples, observations, false);
sgp.optimize_hyperparams();
......@@ -843,7 +843,7 @@ BOOST_AUTO_TEST_CASE(test_sparse_gp_accuracy)
using KF_t = kernel::SquaredExpARD<SparseParams>;
using MF_t = mean::Constant<SparseParams>;
using GP_t = model::GP<SparseParams, KF_t, MF_t, model::gp::KernelLFOpt<SparseParams, opt::Rprop<SparseParams>>>;
using SparseGP_t = model::SparseGP<SparseParams, KF_t, MF_t, model::gp::KernelLFOpt<SparseParams, opt::Rprop<SparseParams>>>;
using SparsifiedGP_t = model::SparsifiedGP<SparseParams, KF_t, MF_t, model::gp::KernelLFOpt<SparseParams, opt::Rprop<SparseParams>>>;
for (size_t i = 0; i < N; i++) {
......@@ -866,7 +866,7 @@ BOOST_AUTO_TEST_CASE(test_sparse_gp_accuracy)
gp.compute(samples, observations, false);
gp.optimize_hyperparams();
SparseGP_t sgp;
SparsifiedGP_t sgp;
sgp.compute(samples, observations, false);
sgp.optimize_hyperparams();
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment