Commit 5a9a7d28 authored by Konstantinos Chatzilygeroudis's avatar Konstantinos Chatzilygeroudis
Browse files

First commit for new kernels. Experimental and benchmarks still do not work

parent 06141891
......@@ -72,6 +72,10 @@ BO_PARAMS(std::cout,
BO_PARAM(double, alpha, 0.1);
};
struct kernel : public defaults::kernel {
BO_PARAM(double, noise, 0.001);
};
struct kernel_maternfivehalves {
BO_PARAM(double, sigma_sq, 1);
BO_PARAM(double, l, 0.2);
......@@ -82,7 +86,6 @@ BO_PARAMS(std::cout,
};
struct bayes_opt_boptimizer : public defaults::bayes_opt_boptimizer {
BO_PARAM(double, noise, 0.001);
};
struct init_randomsampling {
......
......@@ -66,6 +66,10 @@ struct Params {
struct opt_gridsearch : public defaults::opt_gridsearch {
};
#endif
struct kernel : public defaults::kernel {
BO_PARAM(double, noise, 0.001);
};
struct kernel_maternfivehalves {
BO_PARAM(double, sigma_sq, 1);
BO_PARAM(double, l, 0.2);
......@@ -76,7 +80,6 @@ struct Params {
};
struct bayes_opt_boptimizer : public defaults::bayes_opt_boptimizer {
BO_PARAM(double, noise, 0.001);
};
struct init_randomsampling {
......
......@@ -66,6 +66,10 @@ struct Params {
struct opt_rprop : public defaults::opt_rprop {
};
struct kernel : public defaults::kernel {
BO_PARAM(double, noise, 0.001);
};
struct kernel_squared_exp_ard : public defaults::kernel_squared_exp_ard {
};
......@@ -79,8 +83,6 @@ struct Params {
};
struct bayes_opt_boptimizer : public defaults::bayes_opt_boptimizer {
BO_PARAM(double, noise, 0.001);
BO_PARAM(bool, stats_enabled, true);
BO_PARAM(int, hp_period, 50);
};
......
......@@ -68,7 +68,6 @@
namespace limbo {
namespace defaults {
struct bayes_opt_boptimizer {
BO_PARAM(double, noise, 1e-6);
BO_PARAM(int, hp_period, -1);
};
}
......@@ -143,7 +142,7 @@ namespace limbo {
this->_init(sfun, afun, reset);
if (!this->_observations.empty())
_model.compute(this->_samples, this->_observations, Eigen::VectorXd::Constant(this->_observations.size(), Params::bayes_opt_boptimizer::noise()));
_model.compute(this->_samples, this->_observations);
else
_model = model_t(StateFunction::dim_in, StateFunction::dim_out);
......@@ -160,7 +159,7 @@ namespace limbo {
this->_update_stats(*this, afun);
_model.add_sample(this->_samples.back(), this->_observations.back(), Params::bayes_opt_boptimizer::noise());
_model.add_sample(this->_samples.back(), this->_observations.back());
if (Params::bayes_opt_boptimizer::hp_period() > 0
&& (this->_current_iteration + 1) % Params::bayes_opt_boptimizer::hp_period() == 0)
......
......@@ -49,6 +49,7 @@
///@defgroup kernel
///@defgroup kernel_defaults
#include <limbo/kernel/kernel.hpp>
#include <limbo/kernel/exp.hpp>
#include <limbo/kernel/matern_five_halves.hpp>
#include <limbo/kernel/matern_three_halves.hpp>
......
......@@ -46,9 +46,7 @@
#ifndef LIMBO_KERNEL_EXP_HPP
#define LIMBO_KERNEL_EXP_HPP
#include <Eigen/Core>
#include <limbo/tools/macros.hpp>
#include <limbo/kernel/kernel.hpp>
namespace limbo {
namespace defaults {
......@@ -73,9 +71,10 @@ namespace limbo {
\endrst
*/
template <typename Params>
struct Exp {
struct Exp : public BaseKernel<Params, Exp<Params>> {
Exp(size_t dim = 1) {}
double operator()(const Eigen::VectorXd& v1, const Eigen::VectorXd& v2) const
double kernel(const Eigen::VectorXd& v1, const Eigen::VectorXd& v2) const
{
double _l = Params::kernel_exp::l();
return Params::kernel_exp::sigma_sq() * (std::exp(-(1 / (2 * std::pow(_l, 2))) * std::pow((v1 - v2).norm(), 2)));
......
//| Copyright Inria May 2015
//| This project has received funding from the European Research Council (ERC) under
//| the European Union's Horizon 2020 research and innovation programme (grant
//| agreement No 637972) - see http://www.resibots.eu
//|
//| Contributor(s):
//| - Jean-Baptiste Mouret (jean-baptiste.mouret@inria.fr)
//| - Antoine Cully (antoinecully@gmail.com)
//| - Kontantinos Chatzilygeroudis (konstantinos.chatzilygeroudis@inria.fr)
//| - Federico Allocati (fede.allocati@gmail.com)
//| - Vaios Papaspyros (b.papaspyros@gmail.com)
//| - Roberto Rama (bertoski@gmail.com)
//|
//| This software is a computer library whose purpose is to optimize continuous,
//| black-box functions. It mainly implements Gaussian processes and Bayesian
//| optimization.
//| Main repository: http://github.com/resibots/limbo
//| Documentation: http://www.resibots.eu/limbo
//|
//| This software is governed by the CeCILL-C license under French law and
//| abiding by the rules of distribution of free software. You can use,
//| modify and/ or redistribute the software under the terms of the CeCILL-C
//| license as circulated by CEA, CNRS and INRIA at the following URL
//| "http://www.cecill.info".
//|
//| As a counterpart to the access to the source code and rights to copy,
//| modify and redistribute granted by the license, users are provided only
//| with a limited warranty and the software's author, the holder of the
//| economic rights, and the successive licensors have only limited
//| liability.
//|
//| In this respect, the user's attention is drawn to the risks associated
//| with loading, using, modifying and/or developing or reproducing the
//| software by the user in light of its specific status of free software,
//| that may mean that it is complicated to manipulate, and that also
//| therefore means that it is reserved for developers and experienced
//| professionals having in-depth computer knowledge. Users are therefore
//| encouraged to load and test the software's suitability as regards their
//| requirements in conditions enabling the security of their systems and/or
//| data to be ensured and, more generally, to use and operate it in the
//| same conditions as regards security.
//|
//| The fact that you are presently reading this means that you have had
//| knowledge of the CeCILL-C license and that you accept its terms.
//|
#ifndef LIMBO_KERNEL_KERNEL_HPP
#define LIMBO_KERNEL_KERNEL_HPP
#include <Eigen/Core>
#include <limbo/tools/macros.hpp>
namespace limbo {
namespace defaults {
struct kernel {
/// @ingroup kernel_defaults
BO_PARAM(double, noise, 0.01);
BO_PARAM(bool, optimize_noise, false);
};
}
namespace kernel {
/**
@ingroup kernel
\rst
Base struct for kernel definition
Handles the noise and its optimization
\endrst
Parameters:
- ``double noise`` (initial signal noise)
- ``optimize_noise`` (whether we are optimizing for the noise or not)
*/
template <typename Params, typename Kernel>
struct BaseKernel {
public:
BaseKernel(size_t dim = 1) : _noise(Params::kernel::noise())
{
_noise_p = std::log(std::sqrt(_noise));
}
double operator()(const Eigen::VectorXd& v1, const Eigen::VectorXd& v2) const
{
return static_cast<const Kernel*>(this)->kernel(v1, v2) + (((v1 - v2).norm() < 1e-8) ? _noise + 1e-8 : 0.0);
}
Eigen::VectorXd grad(const Eigen::VectorXd& x1, const Eigen::VectorXd& x2) const
{
Eigen::VectorXd g = static_cast<const Kernel*>(this)->gradient(x1, x2);
if (Params::kernel::optimize_noise()) {
g.conservativeResize(g.size() + 1);
g(g.size() - 1) = (((x1 - x1).norm() < 1e-8) ? 2.0 * _noise + 1e-8 : 0.0);
}
return g;
}
// Get the hyper parameters size
size_t h_params_size() const
{
return static_cast<const Kernel*>(this)->params_size() + (Params::kernel::optimize_noise() ? 1 : 0);
}
// Get the hyper parameters in log-space
Eigen::VectorXd h_params() const
{
Eigen::VectorXd params = static_cast<const Kernel*>(this)->params();
if (Params::kernel::optimize_noise()) {
params.conservativeResize(params.size() + 1);
params(params.size() - 1) = _noise_p;
}
return params;
}
// We expect the input parameters to be in log-space
void set_h_params(const Eigen::VectorXd& p)
{
static_cast<Kernel*>(this)->set_params(p.head(h_params_size() - (Params::kernel::optimize_noise() ? 1 : 0)));
if (Params::kernel::optimize_noise()) {
_noise_p = p(h_params_size() - 1);
_noise = std::exp(2 * _noise_p);
}
}
// Get signal noise
double noise() const { return _noise; }
protected:
double _noise;
double _noise_p;
};
}
}
#endif
......@@ -46,9 +46,7 @@
#ifndef LIMBO_KERNEL_MATERN_FIVE_HALVES_HPP
#define LIMBO_KERNEL_MATERN_FIVE_HALVES_HPP
#include <Eigen/Core>
#include <limbo/tools/macros.hpp>
#include <limbo/kernel/kernel.hpp>
namespace limbo {
namespace defaults {
......@@ -83,10 +81,10 @@ namespace limbo {
\endrst
*/
template <typename Params>
struct MaternFiveHalves {
struct MaternFiveHalves : public BaseKernel<Params, MaternFiveHalves<Params>> {
MaternFiveHalves(size_t dim = 1) {}
double operator()(const Eigen::VectorXd& v1, const Eigen::VectorXd& v2) const
double kernel(const Eigen::VectorXd& v1, const Eigen::VectorXd& v2) const
{
double d = (v1 - v2).norm();
return Params::kernel_maternfivehalves::sigma_sq() * (1 + std::sqrt(5) * d / Params::kernel_maternfivehalves::l() + 5 * d * d / (3 * Params::kernel_maternfivehalves::l() * Params::kernel_maternfivehalves::l())) * std::exp(-std::sqrt(5) * d / Params::kernel_maternfivehalves::l());
......
......@@ -46,9 +46,7 @@
#ifndef LIMBO_KERNEL_MATERN_THREE_HALVES_HPP
#define LIMBO_KERNEL_MATERN_THREE_HALVES_HPP
#include <Eigen/Core>
#include <limbo/tools/macros.hpp>
#include <limbo/kernel/kernel.hpp>
namespace limbo {
namespace defaults {
......@@ -81,10 +79,10 @@ namespace limbo {
\endrst
*/
template <typename Params>
struct MaternThreeHalves {
struct MaternThreeHalves : public BaseKernel<Params, MaternThreeHalves<Params>> {
MaternThreeHalves(size_t dim = 1) {}
double operator()(const Eigen::VectorXd& v1, const Eigen::VectorXd& v2) const
double kernel(const Eigen::VectorXd& v1, const Eigen::VectorXd& v2) const
{
double d = (v1 - v2).norm();
return Params::kernel_maternthreehalves::sigma_sq() * (1 + std::sqrt(3) * d / Params::kernel_maternthreehalves::l()) * std::exp(-std::sqrt(3) * d / Params::kernel_maternthreehalves::l());
......
......@@ -46,7 +46,7 @@
#ifndef LIMBO_KERNEL_SQUARED_EXP_ARD_HPP
#define LIMBO_KERNEL_SQUARED_EXP_ARD_HPP
#include <Eigen/Core>
#include <limbo/kernel/kernel.hpp>
namespace limbo {
namespace defaults {
......@@ -69,31 +69,31 @@ namespace limbo {
.. math::
k_{SE}(x, y) = \sigma^2 \exp \Big(-\frac{1}{2}(x-y)^TM(x-y)\Big),
with :math:`M = \Lambda\Lambda^T + diag(l_1^{-2}, \dots, l_n^{-2})` being the characteristic length scales and :math:`\alpha` describing the variability of the latent function. The parameters :math:`l_1^2, \dots, l_n^2, \Lambda` are expected in this order in the parameter array. :math:`\Lambda` is a :math:`D\times k` matrix with :math:`k<D`.
with :math:`M = \Lambda\Lambda^T + diag(l_1^{-2}, \dots, l_n^{-2})` being the characteristic length scales and :math:`\alpha` describing the variability of the latent function. The parameters :math:`l_1^2, \dots, l_n^2, \Lambda` are expected in this order in the parameter array. :math:`\Lambda` is a :math:`D\times k` matrix with :math:`k<D`.
Parameters:
- ``double sigma_sq`` (signal variance)
- ``double sigma_sq`` (initial signal variance)
- ``int k`` (number of columns of :math:`\Lambda` matrix)
Reference: :cite:`Rasmussen2006`, p. 106 & :cite:`brochu2010tutorial`, p. 10
\endrst
*/
template <typename Params>
struct SquaredExpARD {
SquaredExpARD(int dim = 1) : _sf2(0), _ell(dim), _A(dim, Params::kernel_squared_exp_ard::k()), _input_dim(dim)
struct SquaredExpARD : public BaseKernel<Params, SquaredExpARD<Params>> {
SquaredExpARD(int dim = 1) : _ell(dim), _A(dim, Params::kernel_squared_exp_ard::k()), _input_dim(dim)
{
Eigen::VectorXd p = Eigen::VectorXd::Zero(_ell.size() + _ell.size() * Params::kernel_squared_exp_ard::k());
this->set_h_params(p);
_sf2 = Params::kernel_squared_exp_ard::sigma_sq();
Eigen::VectorXd p = Eigen::VectorXd::Zero(_ell.size() + _ell.size() * Params::kernel_squared_exp_ard::k() + 1);
p(p.size() - 1) = std::log(std::sqrt(Params::kernel_squared_exp_ard::sigma_sq()));
this->set_params(p);
}
size_t h_params_size() const { return _ell.size() + _ell.size() * Params::kernel_squared_exp_ard::k(); }
size_t params_size() const { return _ell.size() + _ell.size() * Params::kernel_squared_exp_ard::k() + 1; }
// Return the hyper parameters in log-space
const Eigen::VectorXd& h_params() const { return _h_params; }
Eigen::VectorXd params() const { return _h_params; }
// We expect the input parameters to be in log-space
void set_h_params(const Eigen::VectorXd& p)
void set_params(const Eigen::VectorXd& p)
{
_h_params = p;
for (size_t i = 0; i < _input_dim; ++i)
......@@ -101,12 +101,13 @@ namespace limbo {
for (size_t j = 0; j < (unsigned int)Params::kernel_squared_exp_ard::k(); ++j)
for (size_t i = 0; i < _input_dim; ++i)
_A(i, j) = std::exp(p((j + 1) * _input_dim + i));
_sf2 = std::exp(2.0 * p(params_size() - 1));
}
Eigen::VectorXd grad(const Eigen::VectorXd& x1, const Eigen::VectorXd& x2) const
Eigen::VectorXd gradient(const Eigen::VectorXd& x1, const Eigen::VectorXd& x2) const
{
if (Params::kernel_squared_exp_ard::k() > 0) {
Eigen::VectorXd grad = Eigen::VectorXd::Zero(this->h_params_size());
Eigen::VectorXd grad = Eigen::VectorXd::Zero(this->params_size());
Eigen::MatrixXd K = (_A * _A.transpose());
K.diagonal() += (Eigen::MatrixXd)(_ell.array().inverse().square());
double z = ((x1 - x2).transpose() * K * (x1 - x2)).norm();
......@@ -117,18 +118,22 @@ namespace limbo {
for (size_t j = 0; j < Params::kernel_squared_exp_ard::k(); ++j)
grad.segment((1 + j) * _input_dim, _input_dim) = G.col(j);
grad(grad.size() - 1) = 2 * k;
return grad;
}
else {
Eigen::VectorXd grad(_input_dim);
Eigen::VectorXd grad(this->params_size());
Eigen::VectorXd z = (x1 - x2).cwiseQuotient(_ell).array().square();
double k = _sf2 * std::exp(-0.5 * z.sum());
grad.head(_input_dim) = z * k;
grad(grad.size() - 1) = 2 * k;
return grad;
}
}
double operator()(const Eigen::VectorXd& x1, const Eigen::VectorXd& x2) const
double kernel(const Eigen::VectorXd& x1, const Eigen::VectorXd& x2) const
{
assert(x1.size() == _ell.size());
double z;
......
......@@ -75,10 +75,9 @@ namespace limbo {
GP(int dim_in, int dim_out)
: _dim_in(dim_in), _dim_out(dim_out), _kernel_function(dim_in), _mean_function(dim_out) {}
/// Compute the GP from samples, observation, noise. This call needs to be explicit!
/// Compute the GP from samples and observations. This call needs to be explicit!
void compute(const std::vector<Eigen::VectorXd>& samples,
const std::vector<Eigen::VectorXd>& observations,
const Eigen::VectorXd& noises, bool compute_kernel = true)
const std::vector<Eigen::VectorXd>& observations, bool compute_kernel = true)
{
assert(samples.size() != 0);
assert(observations.size() != 0);
......@@ -102,8 +101,6 @@ namespace limbo {
_mean_observation = _observations.colwise().mean();
_noises = noises;
this->_compute_obs_mean();
if (compute_kernel)
this->_compute_full_kernel();
......@@ -117,7 +114,7 @@ namespace limbo {
/// add sample and update the GP. This code uses an incremental implementation of the Cholesky
/// decomposition. It is therefore much faster than a call to compute()
void add_sample(const Eigen::VectorXd& sample, const Eigen::VectorXd& observation, double noise)
void add_sample(const Eigen::VectorXd& sample, const Eigen::VectorXd& observation)
{
if (_samples.empty()) {
if (_dim_in != sample.size()) {
......@@ -141,10 +138,6 @@ namespace limbo {
_mean_observation = _observations.colwise().mean();
_noises.conservativeResize(_noises.size() + 1);
_noises[_noises.size() - 1] = noise;
//_noise = noise;
this->_compute_obs_mean();
this->_compute_incremental_kernel();
}
......@@ -276,9 +269,6 @@ namespace limbo {
Eigen::MatrixXd _mean_vector;
Eigen::MatrixXd _obs_mean;
Eigen::VectorXd _noises;
Eigen::VectorXd _noises_bl;
Eigen::MatrixXd _alpha;
Eigen::VectorXd _mean_observation;
......@@ -306,7 +296,7 @@ namespace limbo {
// O(n^2) [should be negligible]
for (size_t i = 0; i < n; i++)
for (size_t j = 0; j <= i; ++j)
_kernel(i, j) = _kernel_function(_samples[i], _samples[j]) + ((i == j) ? _noises[i] : 0); // noise only on the diagonal
_kernel(i, j) = _kernel_function(_samples[i], _samples[j]);
for (size_t i = 0; i < n; i++)
for (size_t j = 0; j < i; ++j)
......@@ -329,7 +319,7 @@ namespace limbo {
_kernel.conservativeResize(n, n);
for (size_t i = 0; i < n; ++i) {
_kernel(i, n - 1) = _kernel_function(_samples[i], _samples[n - 1]) + ((i == n - 1) ? _noises[i] : 0); // noise only on the diagonal
_kernel(i, n - 1) = _kernel_function(_samples[i], _samples[n - 1]);
_kernel(n - 1, i) = _kernel(i, n - 1);
}
......
......@@ -63,7 +63,7 @@ namespace limbo {
this->_called = true;
KernelLFOptimization<GP> optimization(gp);
Optimizer optimizer;
auto params = optimizer(optimization, gp.kernel_function().h_params(), false);
Eigen::VectorXd params = optimizer(optimization, gp.kernel_function().h_params(), false);
gp.kernel_function().set_h_params(params);
gp.set_lik(opt::eval(optimization, params));
gp.recompute(false);
......
......@@ -67,7 +67,7 @@ namespace limbo {
Eigen::VectorXd init(dim);
init.head(gp.kernel_function().h_params_size()) = gp.kernel_function().h_params();
init.tail(gp.mean_function().h_params_size()) = gp.mean_function().h_params();
auto params = optimizer(optimization, init, false);
Eigen::VectorXd params = optimizer(optimization, init, false);
gp.kernel_function().set_h_params(params.head(gp.kernel_function().h_params_size()));
gp.mean_function().set_h_params(params.tail(gp.mean_function().h_params_size()));
gp.set_lik(opt::eval(optimization, params));
......
......@@ -63,7 +63,7 @@ namespace limbo {
this->_called = true;
MeanLFOptimization<GP> optimization(gp);
Optimizer optimizer;
auto params = optimizer(optimization, gp.mean_function().h_params(), false);
Eigen::VectorXd params = optimizer(optimization, gp.mean_function().h_params(), false);
gp.mean_function().set_h_params(params);
gp.set_lik(opt::eval(optimization, params));
gp.recompute(true, false);
......
......@@ -230,7 +230,6 @@ struct Params {
};
struct bayes_opt_boptimizer : public defaults::bayes_opt_boptimizer {
BO_PARAM(double, noise, 0.0);
};
struct init_randomsampling {
......@@ -241,6 +240,10 @@ struct Params {
BO_PARAM(int, iterations, 40);
};
struct kernel : public defaults::kernel {
BO_PARAM(double, noise, 1e-6);
};
struct kernel_exp : public defaults::kernel_exp {
};
......
......@@ -70,7 +70,6 @@ struct Params {
};
struct bayes_opt_boptimizer : public defaults::bayes_opt_boptimizer {
BO_PARAM(double, noise, 0.0);
BO_DYN_PARAM(int, hp_period);
};
......@@ -78,6 +77,10 @@ struct Params {
BO_PARAM(int, iterations, 190);
};
struct kernel : public defaults::kernel {
BO_PARAM(double, noise, 0.0);
};
struct kernel_exp : public defaults::kernel_exp {
BO_PARAM(double, l, 0.1);
BO_PARAM(double, sigma_sq, 0.25);
......
......@@ -100,6 +100,9 @@ Eigen::VectorXd make_v2(double x1, double x2)
}
struct Params {
struct kernel : public defaults::kernel {
};
struct kernel_squared_exp_ard : public defaults::kernel_squared_exp_ard {
};
......@@ -146,12 +149,12 @@ BOOST_AUTO_TEST_CASE(test_gp_check_lf_grad)
}
for (int i = 0; i < M; i++) {
test_samples.push_back(tools::random_vector(4));
test_samples.push_back(tools::random_vector(4 + 1));
test_samples_mean.push_back(tools::random_vector(6));
test_samples_kernel_mean.push_back(tools::random_vector(6 + 4));
test_samples_kernel_mean.push_back(tools::random_vector(6 + 4 + 1));
}
gp.compute(samples, observations, Eigen::VectorXd::Ones(samples.size()) * 0.01);
gp.compute(samples, observations);
model::gp::KernelLFOpt<Params>::KernelLFOptimization<GP_t> kernel_optimization(gp);
......@@ -200,7 +203,7 @@ BOOST_AUTO_TEST_CASE(test_gp_dim)
make_v2(5, 5)};
std::vector<Eigen::VectorXd> samples = {make_v2(1, 1), make_v2(2, 2), make_v2(3, 3)};
gp.compute(samples, observations, Eigen::VectorXd::Zero(samples.size()));
gp.compute(samples, observations);
Eigen::VectorXd mu;
double sigma;
......@@ -224,7 +227,7 @@ BOOST_AUTO_TEST_CASE(test_gp)
make_v1(5)};
std::vector<Eigen::VectorXd> samples = {make_v1(1), make_v1(2), make_v1(3)};
gp.compute(samples, observations, Eigen::VectorXd::Zero(samples.size()));
gp.compute(samples, observations);
Eigen::VectorXd mu;
double sigma;
......@@ -273,7 +276,7 @@ BOOST_AUTO_TEST_CASE(test_gp_bw_inversion)
GP_t gp;
auto t1 = std::chrono::steady_clock::now();
gp.compute(samples, observations, Eigen::VectorXd::Zero(samples.size()));
gp.compute(samples, observations);
auto time_init = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::steady_clock::now() - t1).count();
std::cout.precision(17);
std::cout << "Time running first batch: " << time_init << "us" << std::endl
......@@ -283,7 +286,7 @@ BOOST_AUTO_TEST_CASE(test_gp_bw_inversion)
samples.push_back(make_v1(rgen.rand()));
t1 = std::chrono::steady_clock::now();
gp.add_sample(samples.back(), observations.back(), 0.0);
gp.add_sample(samples.back(), observations.back());
auto time_increment = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::steady_clock::now() - t1).count();
std::cout << "Time running increment: " << time_increment << "us" << std::endl
<< std::endl;
......@@ -296,7 +299,7 @@ BOOST_AUTO_TEST_CASE(test_gp_bw_inversion)