diff --git a/.github/workflows/clang-format-check.yml b/.github/workflows/clang-format-check.yml index a733e45b2..4cfb39977 100644 --- a/.github/workflows/clang-format-check.yml +++ b/.github/workflows/clang-format-check.yml @@ -4,10 +4,15 @@ jobs: formatting-check: name: Formatting Check runs-on: ubuntu-latest + strategy: + matrix: + path: + - 'src' + - 'contributions/MPI_IS_gaussian_process' steps: - uses: actions/checkout@v4 - name: Run clang-format style check for C/C++ source files uses: jidicula/clang-format-action@v4.13.0 with: clang-format-version: '18' - check-path: 'src' + check-path: ${{ matrix.path }} diff --git a/contributions/MPI_IS_gaussian_process/src/covariance_functions.cpp b/contributions/MPI_IS_gaussian_process/src/covariance_functions.cpp index 6b5284d32..2674af0ad 100644 --- a/contributions/MPI_IS_gaussian_process/src/covariance_functions.cpp +++ b/contributions/MPI_IS_gaussian_process/src/covariance_functions.cpp @@ -47,160 +47,167 @@ namespace covariance_functions { - /* PeriodicSquareExponential */ - PeriodicSquareExponential::PeriodicSquareExponential() : - hyperParameters(Eigen::VectorXd::Zero(4)), extraParameters(Eigen::VectorXd::Ones(1)*std::numeric_limits::max()) { } - - PeriodicSquareExponential::PeriodicSquareExponential(const Eigen::VectorXd& hyperParameters_) : - hyperParameters(hyperParameters_), extraParameters(Eigen::VectorXd::Ones(1)*std::numeric_limits::max()) { } +/* PeriodicSquareExponential */ +PeriodicSquareExponential::PeriodicSquareExponential() + : hyperParameters(Eigen::VectorXd::Zero(4)), extraParameters(Eigen::VectorXd::Ones(1) * std::numeric_limits::max()) +{ +} + +PeriodicSquareExponential::PeriodicSquareExponential(const Eigen::VectorXd& hyperParameters_) + : hyperParameters(hyperParameters_), extraParameters(Eigen::VectorXd::Ones(1) * std::numeric_limits::max()) +{ +} + +Eigen::MatrixXd PeriodicSquareExponential::evaluate(const Eigen::VectorXd& x, const Eigen::VectorXd& y) +{ + + double lsSE0 = exp(hyperParameters(0)); + double svSE0 = exp(2 * hyperParameters(1)); + double lsP = exp(hyperParameters(2)); + double svP = exp(2 * hyperParameters(3)); + + double plP = exp(extraParameters(0)); + + // Work with arrays internally, convert to matrix for return value. + // This is because all the operations act elementwise, and Eigen::Arrays + // do so, too. + + // Compute Distances + Eigen::ArrayXXd squareDistanceXY = math_tools::squareDistance(x.transpose(), y.transpose()); + + // fast version + return svSE0 * ((-0.5 / std::pow(lsSE0, 2)) * squareDistanceXY).exp() + + svP * (-2 * (((M_PI / plP) * squareDistanceXY.sqrt()).sin() / lsP).square()).exp(); + + /* // verbose version + // Square Exponential Kernel + Eigen::ArrayXXd K0 = squareDistanceXY / std::pow(lsSE0, 2); + K0 = svSE0 * (-0.5 * K0).exp(); + + // Periodic Kernel + Eigen::ArrayXXd K1 = (M_PI * squareDistanceXY.sqrt() / plP); + K1 = K1.sin() / lsP; + K1 = K1.square(); + K1 = svP * (-2 * K1).exp(); + + // Combined Kernel + return K0 + K1; + */ +} + +void PeriodicSquareExponential::setParameters(const Eigen::VectorXd& params) +{ + this->hyperParameters = params; +} + +void PeriodicSquareExponential::setExtraParameters(const Eigen::VectorXd& params) +{ + this->extraParameters = params; +} + +const Eigen::VectorXd& PeriodicSquareExponential::getParameters() const +{ + return this->hyperParameters; +} + +const Eigen::VectorXd& PeriodicSquareExponential::getExtraParameters() const +{ + return this->extraParameters; +} + +int PeriodicSquareExponential::getParameterCount() const +{ + return 4; +} + +int PeriodicSquareExponential::getExtraParameterCount() const +{ + return 1; +} + +/* PeriodicSquareExponential2 */ +PeriodicSquareExponential2::PeriodicSquareExponential2() + : hyperParameters(Eigen::VectorXd::Zero(6)), extraParameters(Eigen::VectorXd::Ones(1) * std::numeric_limits::max()) +{ +} + +PeriodicSquareExponential2::PeriodicSquareExponential2(const Eigen::VectorXd& hyperParameters_) + : hyperParameters(hyperParameters_), extraParameters(Eigen::VectorXd::Ones(1) * std::numeric_limits::max()) +{ +} + +Eigen::MatrixXd PeriodicSquareExponential2::evaluate(const Eigen::VectorXd& x, const Eigen::VectorXd& y) +{ + + double lsSE0 = exp(hyperParameters(0)); + double svSE0 = exp(2 * hyperParameters(1)); + double lsP = exp(hyperParameters(2)); + double svP = exp(2 * hyperParameters(3)); + double lsSE1 = exp(hyperParameters(4)); + double svSE1 = exp(2 * hyperParameters(5)); + + double plP = exp(extraParameters(0)); + + // Work with arrays internally, convert to matrix for return value. + // This is because all the operations act elementwise, and Eigen::Arrays + // do so, too. + + // Compute Distances + Eigen::ArrayXXd squareDistanceXY = math_tools::squareDistance(x.transpose(), y.transpose()); + + // fast version + return svSE0 * ((-0.5 / std::pow(lsSE0, 2)) * squareDistanceXY).exp() + + svP * (-2 * (((M_PI / plP) * squareDistanceXY.sqrt()).sin() / lsP).square()).exp() + + svSE1 * ((-0.5 / std::pow(lsSE1, 2)) * squareDistanceXY).exp(); + + /* // verbose version + // Square Exponential Kernel + Eigen::ArrayXXd K0 = squareDistanceXY / pow(lsSE0, 2); + K0 = svSE0 * (-0.5 * K0).exp(); + + // Periodic Kernel + Eigen::ArrayXXd K1 = (M_PI * squareDistanceXY.sqrt() / plP); + K1 = K1.sin() / lsP; + K1 = K1.square(); + K1 = svP * (-2 * K1).exp(); + + // Square Exponential Kernel + Eigen::ArrayXXd K2 = squareDistanceXY / pow(lsSE1, 2); + K2 = svSE1 * (-0.5 * K2).exp(); + + // Combined Kernel + return K0 + K1 + K2; + */ +} + +void PeriodicSquareExponential2::setParameters(const Eigen::VectorXd& params) +{ + this->hyperParameters = params; +} + +void PeriodicSquareExponential2::setExtraParameters(const Eigen::VectorXd& params) +{ + this->extraParameters = params; +} + +const Eigen::VectorXd& PeriodicSquareExponential2::getParameters() const +{ + return this->hyperParameters; +} + +const Eigen::VectorXd& PeriodicSquareExponential2::getExtraParameters() const +{ + return this->extraParameters; +} + +int PeriodicSquareExponential2::getParameterCount() const +{ + return 6; +} + +int PeriodicSquareExponential2::getExtraParameterCount() const +{ + return 1; +} - Eigen::MatrixXd PeriodicSquareExponential::evaluate(const Eigen::VectorXd& x, const Eigen::VectorXd& y) - { - - double lsSE0 = exp(hyperParameters(0)); - double svSE0 = exp(2 * hyperParameters(1)); - double lsP = exp(hyperParameters(2)); - double svP = exp(2 * hyperParameters(3)); - - double plP = exp(extraParameters(0)); - - // Work with arrays internally, convert to matrix for return value. - // This is because all the operations act elementwise, and Eigen::Arrays - // do so, too. - - // Compute Distances - Eigen::ArrayXXd squareDistanceXY = math_tools::squareDistance( x.transpose(), y.transpose()); - - // fast version - return svSE0 * ((-0.5 / std::pow(lsSE0, 2)) * squareDistanceXY).exp() - + svP * (-2 * (((M_PI / plP) * squareDistanceXY.sqrt()).sin() / lsP).square()).exp(); - - /* // verbose version - // Square Exponential Kernel - Eigen::ArrayXXd K0 = squareDistanceXY / std::pow(lsSE0, 2); - K0 = svSE0 * (-0.5 * K0).exp(); - - // Periodic Kernel - Eigen::ArrayXXd K1 = (M_PI * squareDistanceXY.sqrt() / plP); - K1 = K1.sin() / lsP; - K1 = K1.square(); - K1 = svP * (-2 * K1).exp(); - - // Combined Kernel - return K0 + K1; - */ - } - - void PeriodicSquareExponential::setParameters(const Eigen::VectorXd& params) - { - this->hyperParameters = params; - } - - void PeriodicSquareExponential::setExtraParameters(const Eigen::VectorXd& params) - { - this->extraParameters = params; - } - - const Eigen::VectorXd& PeriodicSquareExponential::getParameters() const - { - return this->hyperParameters; - } - - const Eigen::VectorXd& PeriodicSquareExponential::getExtraParameters() const - { - return this->extraParameters; - } - - int PeriodicSquareExponential::getParameterCount() const - { - return 4; - } - - int PeriodicSquareExponential::getExtraParameterCount() const - { - return 1; - } - - - /* PeriodicSquareExponential2 */ - PeriodicSquareExponential2::PeriodicSquareExponential2() : - hyperParameters(Eigen::VectorXd::Zero(6)), extraParameters(Eigen::VectorXd::Ones(1)*std::numeric_limits::max()) { } - - PeriodicSquareExponential2::PeriodicSquareExponential2(const Eigen::VectorXd& hyperParameters_) : - hyperParameters(hyperParameters_), extraParameters(Eigen::VectorXd::Ones(1)*std::numeric_limits::max()) { } - - Eigen::MatrixXd PeriodicSquareExponential2::evaluate(const Eigen::VectorXd& x, const Eigen::VectorXd& y) - { - - double lsSE0 = exp(hyperParameters(0)); - double svSE0 = exp(2 * hyperParameters(1)); - double lsP = exp(hyperParameters(2)); - double svP = exp(2 * hyperParameters(3)); - double lsSE1 = exp(hyperParameters(4)); - double svSE1 = exp(2 * hyperParameters(5)); - - double plP = exp(extraParameters(0)); - - // Work with arrays internally, convert to matrix for return value. - // This is because all the operations act elementwise, and Eigen::Arrays - // do so, too. - - // Compute Distances - Eigen::ArrayXXd squareDistanceXY = math_tools::squareDistance( x.transpose(), y.transpose()); - - // fast version - return svSE0 * ((-0.5 / std::pow(lsSE0, 2)) * squareDistanceXY).exp() - + svP * (-2 * (((M_PI / plP) * squareDistanceXY.sqrt()).sin() / lsP).square()).exp() - + svSE1 * ((-0.5 / std::pow(lsSE1, 2)) * squareDistanceXY).exp(); - - /* // verbose version - // Square Exponential Kernel - Eigen::ArrayXXd K0 = squareDistanceXY / pow(lsSE0, 2); - K0 = svSE0 * (-0.5 * K0).exp(); - - // Periodic Kernel - Eigen::ArrayXXd K1 = (M_PI * squareDistanceXY.sqrt() / plP); - K1 = K1.sin() / lsP; - K1 = K1.square(); - K1 = svP * (-2 * K1).exp(); - - // Square Exponential Kernel - Eigen::ArrayXXd K2 = squareDistanceXY / pow(lsSE1, 2); - K2 = svSE1 * (-0.5 * K2).exp(); - - // Combined Kernel - return K0 + K1 + K2; - */ - } - - void PeriodicSquareExponential2::setParameters(const Eigen::VectorXd& params) - { - this->hyperParameters = params; - } - - void PeriodicSquareExponential2::setExtraParameters(const Eigen::VectorXd& params) - { - this->extraParameters = params; - } - - const Eigen::VectorXd& PeriodicSquareExponential2::getParameters() const - { - return this->hyperParameters; - } - - const Eigen::VectorXd& PeriodicSquareExponential2::getExtraParameters() const - { - return this->extraParameters; - } - - int PeriodicSquareExponential2::getParameterCount() const - { - return 6; - } - - int PeriodicSquareExponential2::getExtraParameterCount() const - { - return 1; - } - -} // namespace covariance_functions +} // namespace covariance_functions diff --git a/contributions/MPI_IS_gaussian_process/src/covariance_functions.h b/contributions/MPI_IS_gaussian_process/src/covariance_functions.h index bf11848e0..62633828b 100644 --- a/contributions/MPI_IS_gaussian_process/src/covariance_functions.h +++ b/contributions/MPI_IS_gaussian_process/src/covariance_functions.h @@ -52,167 +52,160 @@ namespace covariance_functions { - /*!@brief Base class definition for covariance functions +/*!@brief Base class definition for covariance functions + */ +class CovFunc +{ +public: + CovFunc() { } + virtual ~CovFunc() { } + + /*! + * Evaluates the covariance function, caches the quantities that are needed + * to calculate gradient and Hessian. */ - class CovFunc - { - public: - CovFunc() {} - virtual ~CovFunc() {} - - /*! - * Evaluates the covariance function, caches the quantities that are needed - * to calculate gradient and Hessian. - */ - virtual Eigen::MatrixXd evaluate(const Eigen::VectorXd& x1, const Eigen::VectorXd& x2) = 0; - - //! Method to set the hyper-parameters. - virtual void setParameters(const Eigen::VectorXd& params) = 0; - virtual void setExtraParameters(const Eigen::VectorXd& params) = 0; - - //! Returns the hyper-parameters. - virtual const Eigen::VectorXd& getParameters() const = 0; - virtual const Eigen::VectorXd& getExtraParameters() const = 0; - - //! Returns the number of hyper-parameters. - virtual int getParameterCount() const = 0; - virtual int getExtraParameterCount() const = 0; - - //! Produces a clone to be able to copy the object. - virtual CovFunc* clone() const = 0; - }; + virtual Eigen::MatrixXd evaluate(const Eigen::VectorXd& x1, const Eigen::VectorXd& x2) = 0; + + //! Method to set the hyper-parameters. + virtual void setParameters(const Eigen::VectorXd& params) = 0; + virtual void setExtraParameters(const Eigen::VectorXd& params) = 0; + + //! Returns the hyper-parameters. + virtual const Eigen::VectorXd& getParameters() const = 0; + virtual const Eigen::VectorXd& getExtraParameters() const = 0; + + //! Returns the number of hyper-parameters. + virtual int getParameterCount() const = 0; + virtual int getExtraParameterCount() const = 0; + + //! Produces a clone to be able to copy the object. + virtual CovFunc *clone() const = 0; +}; + +/*! + * The function computes a combined covariance function. It is a periodic + * covariance function with an additional square exponential. This + * combination makes it possible to learn a signal that consists of both + * periodic and aperiodic parts. + * + * Square Exponential Component: + * @f[ + * k _{\textsc{se}}(t,t';\theta_\textsc{se},\ell_\textsc{se}) = + * \theta_\textsc{se} \cdot + * \exp\left(-\frac{(t-t')^2}{2\ell_\textsc{se}^{2}}\right) + * @f] + * + * Periodic Component: + * @f[ + * k_\textsc{p}(t,t';\theta_\textsc{p},\ell_\textsc{p},\lambda) = + * \theta_\textsc{p} \cdot + * \exp\left(-\frac{2\sin^2\left(\frac{\pi}{\lambda} + * (t-t')\right)}{\ell_\textsc{p}^2}\right) + * @f] + * + * Kernel Combination: + * @f[ + * k _\textsc{c}(t,t';\theta_\textsc{se},\ell_\textsc{se},\theta_\textsc{p}, + * \ell_\textsc{p},\lambda) = + * k_{\textsc{se}}(t,t';\theta_\textsc{se},\ell_\textsc{se}) + * + + * k_\textsc{p}(t,t';\theta_\textsc{p},\ell_\textsc{p},\lambda) + * @f] + */ +class PeriodicSquareExponential : public CovFunc +{ +private: + Eigen::VectorXd hyperParameters; + Eigen::VectorXd extraParameters; + +public: + PeriodicSquareExponential(); + explicit PeriodicSquareExponential(const Eigen::VectorXd& hyperParameters); /*! - * The function computes a combined covariance function. It is a periodic - * covariance function with an additional square exponential. This - * combination makes it possible to learn a signal that consists of both - * periodic and aperiodic parts. - * - * Square Exponential Component: - * @f[ - * k _{\textsc{se}}(t,t';\theta_\textsc{se},\ell_\textsc{se}) = - * \theta_\textsc{se} \cdot - * \exp\left(-\frac{(t-t')^2}{2\ell_\textsc{se}^{2}}\right) - * @f] - * - * Periodic Component: - * @f[ - * k_\textsc{p}(t,t';\theta_\textsc{p},\ell_\textsc{p},\lambda) = - * \theta_\textsc{p} \cdot - * \exp\left(-\frac{2\sin^2\left(\frac{\pi}{\lambda} - * (t-t')\right)}{\ell_\textsc{p}^2}\right) - * @f] - * - * Kernel Combination: - * @f[ - * k _\textsc{c}(t,t';\theta_\textsc{se},\ell_\textsc{se},\theta_\textsc{p}, - * \ell_\textsc{p},\lambda) = - * k_{\textsc{se}}(t,t';\theta_\textsc{se},\ell_\textsc{se}) - * + - * k_\textsc{p}(t,t';\theta_\textsc{p},\ell_\textsc{p},\lambda) - * @f] + * Evaluates the covariance function, caches the quantities that are needed + * to calculate gradient and Hessian. + */ + Eigen::MatrixXd evaluate(const Eigen::VectorXd& x1, const Eigen::VectorXd& x2); + + //! Method to set the hyper-parameters. + void setParameters(const Eigen::VectorXd& params); + void setExtraParameters(const Eigen::VectorXd& params); + + //! Returns the hyper-parameters. + const Eigen::VectorXd& getParameters() const; + const Eigen::VectorXd& getExtraParameters() const; + + //! Returns the number of hyper-parameters. + int getParameterCount() const; + int getExtraParameterCount() const; + + /** + * Produces a clone to be able to copy the object. + */ + virtual CovFunc *clone() const { return new PeriodicSquareExponential(*this); } +}; + +/*! + * The function computes a combined covariance function. It is a periodic + * covariance function with two additional square exponential components. + * This combination makes it possible to learn a signal that consists of + * periodic parts, long-range aperiodic parts and small-range deformations. + * + * Square Exponential Component: + * @f[ + * k _{\textsc{se}}(t,t';\theta_\textsc{se},\ell_\textsc{se}) = + * \theta_\textsc{se} \cdot + * \exp\left(-\frac{(t-t')^2}{2\ell_\textsc{se}^{2}}\right) + * @f] + * + * Periodic Component: + * @f[ + * k_\textsc{p}(t,t';\theta_\textsc{p},\ell_\textsc{p},\lambda) = + * \theta_\textsc{p} \cdot + * \exp\left(-\frac{2\sin^2\left(\frac{\pi}{\lambda} + * (t-t')\right)}{\ell_\textsc{p}^2}\right) + * @f] + * + * Kernel Combination: + * @f[ + * k _\textsc{c}(t,t';\theta_\textsc{se},\ell_\textsc{se},\theta_\textsc{p}, + * \ell_\textsc{p},\lambda) = + * k_{\textsc{se},1}(t,t';\theta_{\textsc{se},1},\ell_{\textsc{se},1}) + * + + * k_\textsc{p}(t,t';\theta_\textsc{p},\ell_\textsc{p},\lambda) + * + + * k_{\textsc{se},2}(t,t';\theta_{\textsc{se},2},\ell_{\textsc{se},2}) + * @f] + */ +class PeriodicSquareExponential2 : public CovFunc +{ +private: + Eigen::VectorXd hyperParameters; + Eigen::VectorXd extraParameters; + +public: + PeriodicSquareExponential2(); + explicit PeriodicSquareExponential2(const Eigen::VectorXd& hyperParameters); + + Eigen::MatrixXd evaluate(const Eigen::VectorXd& x1, const Eigen::VectorXd& x2); + + //! Method to set the hyper-parameters. + void setParameters(const Eigen::VectorXd& params); + void setExtraParameters(const Eigen::VectorXd& params); + + //! Returns the hyper-parameters. + const Eigen::VectorXd& getParameters() const; + const Eigen::VectorXd& getExtraParameters() const; + + //! Returns the number of hyper-parameters. + int getParameterCount() const; + int getExtraParameterCount() const; + + /** + * Produces a clone to be able to copy the object. */ - class PeriodicSquareExponential : public CovFunc - { - private: - Eigen::VectorXd hyperParameters; - Eigen::VectorXd extraParameters; - - public: - PeriodicSquareExponential(); - explicit PeriodicSquareExponential(const Eigen::VectorXd& hyperParameters); - - /*! - * Evaluates the covariance function, caches the quantities that are needed - * to calculate gradient and Hessian. - */ - Eigen::MatrixXd evaluate(const Eigen::VectorXd& x1, const Eigen::VectorXd& x2); - - //! Method to set the hyper-parameters. - void setParameters(const Eigen::VectorXd& params); - void setExtraParameters(const Eigen::VectorXd& params); - - //! Returns the hyper-parameters. - const Eigen::VectorXd& getParameters() const; - const Eigen::VectorXd& getExtraParameters() const; - - //! Returns the number of hyper-parameters. - int getParameterCount() const; - int getExtraParameterCount() const; - - /** - * Produces a clone to be able to copy the object. - */ - virtual CovFunc* clone() const - { - return new PeriodicSquareExponential(*this); - } - }; - - - /*! - * The function computes a combined covariance function. It is a periodic - * covariance function with two additional square exponential components. - * This combination makes it possible to learn a signal that consists of - * periodic parts, long-range aperiodic parts and small-range deformations. - * - * Square Exponential Component: - * @f[ - * k _{\textsc{se}}(t,t';\theta_\textsc{se},\ell_\textsc{se}) = - * \theta_\textsc{se} \cdot - * \exp\left(-\frac{(t-t')^2}{2\ell_\textsc{se}^{2}}\right) - * @f] - * - * Periodic Component: - * @f[ - * k_\textsc{p}(t,t';\theta_\textsc{p},\ell_\textsc{p},\lambda) = - * \theta_\textsc{p} \cdot - * \exp\left(-\frac{2\sin^2\left(\frac{\pi}{\lambda} - * (t-t')\right)}{\ell_\textsc{p}^2}\right) - * @f] - * - * Kernel Combination: - * @f[ - * k _\textsc{c}(t,t';\theta_\textsc{se},\ell_\textsc{se},\theta_\textsc{p}, - * \ell_\textsc{p},\lambda) = - * k_{\textsc{se},1}(t,t';\theta_{\textsc{se},1},\ell_{\textsc{se},1}) - * + - * k_\textsc{p}(t,t';\theta_\textsc{p},\ell_\textsc{p},\lambda) - * + - * k_{\textsc{se},2}(t,t';\theta_{\textsc{se},2},\ell_{\textsc{se},2}) - * @f] - */ - class PeriodicSquareExponential2 : public CovFunc - { - private: - Eigen::VectorXd hyperParameters; - Eigen::VectorXd extraParameters; - - public: - PeriodicSquareExponential2(); - explicit PeriodicSquareExponential2(const Eigen::VectorXd& hyperParameters); - - Eigen::MatrixXd evaluate(const Eigen::VectorXd& x1, const Eigen::VectorXd& x2); - - //! Method to set the hyper-parameters. - void setParameters(const Eigen::VectorXd& params); - void setExtraParameters(const Eigen::VectorXd& params); - - //! Returns the hyper-parameters. - const Eigen::VectorXd& getParameters() const; - const Eigen::VectorXd& getExtraParameters() const; - - //! Returns the number of hyper-parameters. - int getParameterCount() const; - int getExtraParameterCount() const; - - /** - * Produces a clone to be able to copy the object. - */ - virtual CovFunc* clone() const - { - return new PeriodicSquareExponential2(*this); - } - }; -} // namespace covariance_functions -#endif // ifndef COVARIANCE_FUNCTIONS_H + virtual CovFunc *clone() const { return new PeriodicSquareExponential2(*this); } +}; +} // namespace covariance_functions +#endif // ifndef COVARIANCE_FUNCTIONS_H diff --git a/contributions/MPI_IS_gaussian_process/src/gaussian_process.cpp b/contributions/MPI_IS_gaussian_process/src/gaussian_process.cpp index ff16f331b..b6c140ede 100644 --- a/contributions/MPI_IS_gaussian_process/src/gaussian_process.cpp +++ b/contributions/MPI_IS_gaussian_process/src/gaussian_process.cpp @@ -50,64 +50,38 @@ // A functor for special orderings struct covariance_ordering { - covariance_ordering(Eigen::VectorXd const& cov) : covariance_(cov){} - bool operator()(int a, int b) const - { - return (covariance_[a] > covariance_[b]); - } + covariance_ordering(Eigen::VectorXd const& cov) : covariance_(cov) { } + bool operator()(int a, int b) const { return covariance_[a] > covariance_[b]; } Eigen::VectorXd const& covariance_; }; -GP::GP() : covFunc_(nullptr), // initialize pointer to null - covFuncProj_(nullptr), // initialize pointer to null - data_loc_(Eigen::VectorXd()), - data_out_(Eigen::VectorXd()), - data_var_(Eigen::VectorXd()), - gram_matrix_(Eigen::MatrixXd()), - alpha_(Eigen::VectorXd()), - chol_gram_matrix_(Eigen::LDLT()), - log_noise_sd_(-1E20), - use_explicit_trend_(false), - feature_vectors_(Eigen::MatrixXd()), - feature_matrix_(Eigen::MatrixXd()), - chol_feature_matrix_(Eigen::LDLT()), - beta_(Eigen::VectorXd()) -{ } - -GP::GP(const covariance_functions::CovFunc& covFunc) : - covFunc_(covFunc.clone()), - covFuncProj_(nullptr), - data_loc_(Eigen::VectorXd()), - data_out_(Eigen::VectorXd()), - data_var_(Eigen::VectorXd()), - gram_matrix_(Eigen::MatrixXd()), - alpha_(Eigen::VectorXd()), - chol_gram_matrix_(Eigen::LDLT()), - log_noise_sd_(-1E20), - use_explicit_trend_(false), - feature_vectors_(Eigen::MatrixXd()), - feature_matrix_(Eigen::MatrixXd()), - chol_feature_matrix_(Eigen::LDLT()), - beta_(Eigen::VectorXd()) -{ } - -GP::GP(const double noise_variance, - const covariance_functions::CovFunc& covFunc) : - covFunc_(covFunc.clone()), - covFuncProj_(nullptr), - data_loc_(Eigen::VectorXd()), - data_out_(Eigen::VectorXd()), - data_var_(Eigen::VectorXd()), - gram_matrix_(Eigen::MatrixXd()), - alpha_(Eigen::VectorXd()), - chol_gram_matrix_(Eigen::LDLT()), - log_noise_sd_(std::log(noise_variance)), - use_explicit_trend_(false), - feature_vectors_(Eigen::MatrixXd()), - feature_matrix_(Eigen::MatrixXd()), - chol_feature_matrix_(Eigen::LDLT()), - beta_(Eigen::VectorXd()) -{ } +GP::GP() + : covFunc_(nullptr), // initialize pointer to null + covFuncProj_(nullptr), // initialize pointer to null + data_loc_(Eigen::VectorXd()), data_out_(Eigen::VectorXd()), data_var_(Eigen::VectorXd()), gram_matrix_(Eigen::MatrixXd()), + alpha_(Eigen::VectorXd()), chol_gram_matrix_(Eigen::LDLT()), log_noise_sd_(-1E20), + use_explicit_trend_(false), feature_vectors_(Eigen::MatrixXd()), feature_matrix_(Eigen::MatrixXd()), + chol_feature_matrix_(Eigen::LDLT()), beta_(Eigen::VectorXd()) +{ +} + +GP::GP(const covariance_functions::CovFunc& covFunc) + : covFunc_(covFunc.clone()), covFuncProj_(nullptr), data_loc_(Eigen::VectorXd()), data_out_(Eigen::VectorXd()), + data_var_(Eigen::VectorXd()), gram_matrix_(Eigen::MatrixXd()), alpha_(Eigen::VectorXd()), + chol_gram_matrix_(Eigen::LDLT()), log_noise_sd_(-1E20), use_explicit_trend_(false), + feature_vectors_(Eigen::MatrixXd()), feature_matrix_(Eigen::MatrixXd()), + chol_feature_matrix_(Eigen::LDLT()), beta_(Eigen::VectorXd()) +{ +} + +GP::GP(const double noise_variance, const covariance_functions::CovFunc& covFunc) + : covFunc_(covFunc.clone()), covFuncProj_(nullptr), data_loc_(Eigen::VectorXd()), data_out_(Eigen::VectorXd()), + data_var_(Eigen::VectorXd()), gram_matrix_(Eigen::MatrixXd()), alpha_(Eigen::VectorXd()), + chol_gram_matrix_(Eigen::LDLT()), log_noise_sd_(std::log(noise_variance)), use_explicit_trend_(false), + feature_vectors_(Eigen::MatrixXd()), feature_matrix_(Eigen::MatrixXd()), + chol_feature_matrix_(Eigen::LDLT()), beta_(Eigen::VectorXd()) +{ +} GP::~GP() { @@ -115,21 +89,13 @@ GP::~GP() delete this->covFuncProj_; // tidy up since we are responsible for the covFuncProj. } -GP::GP(const GP& that) : - covFunc_(nullptr), // initialize to nullptr, clone later - covFuncProj_(nullptr), // initialize to nullptr, clone later - data_loc_(that.data_loc_), - data_out_(that.data_out_), - data_var_(that.data_var_), - gram_matrix_(that.gram_matrix_), - alpha_(that.alpha_), - chol_gram_matrix_(that.chol_gram_matrix_), - log_noise_sd_(that.log_noise_sd_), - use_explicit_trend_(that.use_explicit_trend_), - feature_vectors_(that.feature_vectors_), - feature_matrix_(that.feature_matrix_), - chol_feature_matrix_(that.chol_feature_matrix_), - beta_(that.beta_) +GP::GP(const GP& that) + : covFunc_(nullptr), // initialize to nullptr, clone later + covFuncProj_(nullptr), // initialize to nullptr, clone later + data_loc_(that.data_loc_), data_out_(that.data_out_), data_var_(that.data_var_), gram_matrix_(that.gram_matrix_), + alpha_(that.alpha_), chol_gram_matrix_(that.chol_gram_matrix_), log_noise_sd_(that.log_noise_sd_), + use_explicit_trend_(that.use_explicit_trend_), feature_vectors_(that.feature_vectors_), + feature_matrix_(that.feature_matrix_), chol_feature_matrix_(that.chol_feature_matrix_), beta_(that.beta_) { covFunc_ = that.covFunc_->clone(); covFuncProj_ = that.covFuncProj_->clone(); @@ -148,7 +114,7 @@ bool GP::setCovarianceFunction(const covariance_functions::CovFunc& covFunc) void GP::enableOutputProjection(const covariance_functions::CovFunc& covFuncProj) { - delete covFuncProj_;// initialized to zero, so delete is safe + delete covFuncProj_; // initialized to zero, so delete is safe covFuncProj_ = covFuncProj.clone(); } @@ -162,9 +128,9 @@ GP& GP::operator=(const GP& that) { if (this != &that) { - covariance_functions::CovFunc* temp = covFunc_; // store old pointer... - covFunc_ = that.covFunc_->clone(); // ... first clone ... - delete temp; // ... and then delete. + covariance_functions::CovFunc *temp = covFunc_; // store old pointer... + covFunc_ = that.covFunc_->clone(); // ... first clone ... + delete temp; // ... and then delete. // copy the rest data_loc_ = that.data_loc_; @@ -180,12 +146,10 @@ GP& GP::operator=(const GP& that) Eigen::VectorXd GP::drawSample(const Eigen::VectorXd& locations) const { - return drawSample(locations, - math_tools::generate_normal_random_matrix(locations.rows(), locations.cols())); + return drawSample(locations, math_tools::generate_normal_random_matrix(locations.rows(), locations.cols())); } -Eigen::VectorXd GP::drawSample(const Eigen::VectorXd& locations, - const Eigen::VectorXd& random_vector) const +Eigen::VectorXd GP::drawSample(const Eigen::VectorXd& locations, const Eigen::VectorXd& random_vector) const { Eigen::MatrixXd prior_covariance; Eigen::MatrixXd kernel_matrix; @@ -196,20 +160,18 @@ Eigen::VectorXd GP::drawSample(const Eigen::VectorXd& locations, prior_covariance = covFunc_->evaluate(locations, locations); kernel_matrix = prior_covariance; - if (gram_matrix_.cols() == 0) // no data, i.e. only a prior + if (gram_matrix_.cols() == 0) // no data, i.e. only a prior { - kernel_matrix = prior_covariance + JITTER * Eigen::MatrixXd::Identity( - prior_covariance.rows(), prior_covariance.cols()); + kernel_matrix = prior_covariance + JITTER * Eigen::MatrixXd::Identity(prior_covariance.rows(), prior_covariance.cols()); } else // we have some data { Eigen::MatrixXd mixed_covariance; mixed_covariance = covFunc_->evaluate(locations, data_loc_); Eigen::MatrixXd posterior_covariance; - posterior_covariance = prior_covariance - mixed_covariance * - (chol_gram_matrix_.solve(mixed_covariance.transpose())); - kernel_matrix = posterior_covariance + JITTER * Eigen::MatrixXd::Identity( - posterior_covariance.rows(), posterior_covariance.cols()); + posterior_covariance = prior_covariance - mixed_covariance * (chol_gram_matrix_.solve(mixed_covariance.transpose())); + kernel_matrix = + posterior_covariance + JITTER * Eigen::MatrixXd::Identity(posterior_covariance.rows(), posterior_covariance.cols()); } chol_kernel_matrix = kernel_matrix.llt(); @@ -217,8 +179,7 @@ Eigen::VectorXd GP::drawSample(const Eigen::VectorXd& locations, samples = chol_kernel_matrix.matrixL() * random_vector; // add the measurement noise on return - return samples + std::exp(log_noise_sd_) * - math_tools::generate_normal_random_matrix(samples.rows(), samples.cols()); + return samples + std::exp(log_noise_sd_) * math_tools::generate_normal_random_matrix(samples.rows(), samples.cols()); } void GP::infer() @@ -232,8 +193,8 @@ void GP::infer() gram_matrix_.swap(data_cov); // store the new data_cov as gram matrix if (data_var_.rows() == 0) // homoscedastic { - gram_matrix_ += (std::exp(2 * log_noise_sd_) + JITTER) * - Eigen::MatrixXd::Identity(gram_matrix_.rows(), gram_matrix_.cols()); + gram_matrix_ += + (std::exp(2 * log_noise_sd_) + JITTER) * Eigen::MatrixXd::Identity(gram_matrix_.rows(), gram_matrix_.cols()); } else // heteroscedastic { @@ -250,7 +211,7 @@ void GP::infer() { feature_vectors_ = Eigen::MatrixXd(2, data_loc_.rows()); // precompute necessary matrices for the explicit trend function - feature_vectors_.row(0) = Eigen::MatrixXd::Ones(1,data_loc_.rows()); // instead of pow(0) + feature_vectors_.row(0) = Eigen::MatrixXd::Ones(1, data_loc_.rows()); // instead of pow(0) feature_vectors_.row(1) = data_loc_.array(); // instead of pow(1) feature_matrix_ = feature_vectors_ * chol_gram_matrix_.solve(feature_vectors_.transpose()); @@ -260,8 +221,7 @@ void GP::infer() } } -void GP::infer(const Eigen::VectorXd& data_loc, - const Eigen::VectorXd& data_out, +void GP::infer(const Eigen::VectorXd& data_loc, const Eigen::VectorXd& data_out, const Eigen::VectorXd& data_var /* = EigenVectorXd() */) { data_loc_ = data_loc; @@ -273,15 +233,14 @@ void GP::infer(const Eigen::VectorXd& data_loc, infer(); // updates the Gram matrix and its Cholesky decomposition } -void GP::inferSD(const Eigen::VectorXd& data_loc, - const Eigen::VectorXd& data_out, - const int n, const Eigen::VectorXd& data_var /* = EigenVectorXd() */, - const double prediction_point /*= std::numeric_limits::quiet_NaN()*/) +void GP::inferSD(const Eigen::VectorXd& data_loc, const Eigen::VectorXd& data_out, const int n, + const Eigen::VectorXd& data_var /* = EigenVectorXd() */, + const double prediction_point /*= std::numeric_limits::quiet_NaN()*/) { Eigen::VectorXd covariance; Eigen::VectorXd prediction_loc(1); - if ( math_tools::isNaN(prediction_point) ) + if (math_tools::isNaN(prediction_point)) { // if none given, use the last datapoint as prediction reference prediction_loc = data_loc.tail(1); @@ -296,18 +255,18 @@ void GP::inferSD(const Eigen::VectorXd& data_loc, // generate index vector std::vector index(covariance.size(), 0); - for (size_t i = 0 ; i != index.size() ; i++) { + for (size_t i = 0; i != index.size(); i++) + { index[i] = i; } // sort indices with respect to covariance value - std::sort(index.begin(), index.end(), - covariance_ordering(covariance) - ); + std::sort(index.begin(), index.end(), covariance_ordering(covariance)); bool use_var = data_var.rows() > 0; // true means heteroscedastic noise - if (n < data_loc.rows()) { + if (n < data_loc.rows()) + { std::vector loc_arr(n); std::vector out_arr(n); std::vector var_arr(n); @@ -322,11 +281,11 @@ void GP::inferSD(const Eigen::VectorXd& data_loc, } } - data_loc_ = Eigen::Map(loc_arr.data(),n,1); - data_out_ = Eigen::Map(out_arr.data(),n,1); + data_loc_ = Eigen::Map(loc_arr.data(), n, 1); + data_out_ = Eigen::Map(out_arr.data(), n, 1); if (use_var) { - data_var_ = Eigen::Map(var_arr.data(),n,1); + data_var_ = Eigen::Map(var_arr.data(), n, 1); } } else // we can use all points and don't neet to select @@ -349,12 +308,12 @@ void GP::clearData() data_out_ = Eigen::VectorXd(); } -Eigen::VectorXd GP::predict(const Eigen::VectorXd& locations, Eigen::VectorXd* variances /*=nullptr*/) const +Eigen::VectorXd GP::predict(const Eigen::VectorXd& locations, Eigen::VectorXd *variances /*=nullptr*/) const { // The prior covariance matrix (evaluated on test points) Eigen::MatrixXd prior_cov = covFunc_->evaluate(locations, locations); - if (data_loc_.rows() == 0) // check if the data is empty + if (data_loc_.rows() == 0) // check if the data is empty { if (variances != nullptr) { @@ -371,7 +330,7 @@ Eigen::VectorXd GP::predict(const Eigen::VectorXd& locations, Eigen::VectorXd* v Eigen::MatrixXd phi(2, locations.rows()); if (use_explicit_trend_) { - phi.row(0) = Eigen::MatrixXd::Ones(1,locations.rows()); // instead of pow(0) + phi.row(0) = Eigen::MatrixXd::Ones(1, locations.rows()); // instead of pow(0) phi.row(1) = locations.array(); // instead of pow(1) return predict(prior_cov, mixed_cov, phi, variances); @@ -380,11 +339,11 @@ Eigen::VectorXd GP::predict(const Eigen::VectorXd& locations, Eigen::VectorXd* v } } -Eigen::VectorXd GP::predictProjected(const Eigen::VectorXd& locations, Eigen::VectorXd* variances /*=nullptr*/) const +Eigen::VectorXd GP::predictProjected(const Eigen::VectorXd& locations, Eigen::VectorXd *variances /*=nullptr*/) const { // use the suitable covariance function, depending on whether an // output projection is used or not. - covariance_functions::CovFunc* covFunc = nullptr; + covariance_functions::CovFunc *covFunc = nullptr; if (covFuncProj_ == nullptr) { covFunc = covFunc_; @@ -399,7 +358,7 @@ Eigen::VectorXd GP::predictProjected(const Eigen::VectorXd& locations, Eigen::Ve // The prior covariance matrix (evaluated on test points) Eigen::MatrixXd prior_cov = covFunc->evaluate(locations, locations); - if (data_loc_.rows() == 0) // check if the data is empty + if (data_loc_.rows() == 0) // check if the data is empty { if (variances != nullptr) { @@ -416,7 +375,7 @@ Eigen::VectorXd GP::predictProjected(const Eigen::VectorXd& locations, Eigen::Ve if (use_explicit_trend_) { // calculate the feature vectors for linear regression - phi.row(0) = Eigen::MatrixXd::Ones(1,locations.rows()); // instead of pow(0) + phi.row(0) = Eigen::MatrixXd::Ones(1, locations.rows()); // instead of pow(0) phi.row(1) = locations.array(); // instead of pow(1) return predict(prior_cov, mixed_cov, phi, variances); @@ -426,8 +385,7 @@ Eigen::VectorXd GP::predictProjected(const Eigen::VectorXd& locations, Eigen::Ve } Eigen::VectorXd GP::predict(const Eigen::MatrixXd& prior_cov, const Eigen::MatrixXd& mixed_cov, - const Eigen::MatrixXd& phi /*=Eigen::MatrixXd()*/, Eigen::VectorXd* variances /*=nullptr*/) - const + const Eigen::MatrixXd& phi /*=Eigen::MatrixXd()*/, Eigen::VectorXd *variances /*=nullptr*/) const { // calculate GP mean from precomputed alpha vector diff --git a/contributions/MPI_IS_gaussian_process/src/gaussian_process.h b/contributions/MPI_IS_gaussian_process/src/gaussian_process.h index 0dabe90aa..1d4073d81 100644 --- a/contributions/MPI_IS_gaussian_process/src/gaussian_process.h +++ b/contributions/MPI_IS_gaussian_process/src/gaussian_process.h @@ -62,8 +62,8 @@ class GP { private: - covariance_functions::CovFunc* covFunc_; - covariance_functions::CovFunc* covFuncProj_; + covariance_functions::CovFunc *covFunc_; + covariance_functions::CovFunc *covFuncProj_; Eigen::VectorXd data_loc_; Eigen::VectorXd data_out_; Eigen::VectorXd data_var_; @@ -82,8 +82,7 @@ class GP GP(); // allowing the standard constructor makes the use so much easier! GP(const covariance_functions::CovFunc& covFunc); - GP(const double noise_variance, - const covariance_functions::CovFunc& covFunc); + GP(const double noise_variance, const covariance_functions::CovFunc& covFunc); ~GP(); // Need to tidy up GP(const GP& that); @@ -116,8 +115,7 @@ class GP /*! * Returns a sample of the GP based on a given vector of random numbers. */ - Eigen::VectorXd drawSample(const Eigen::VectorXd& locations, - const Eigen::VectorXd& random_vector) const; + Eigen::VectorXd drawSample(const Eigen::VectorXd& locations, const Eigen::VectorXd& random_vector) const; /*! * Builds an inverts the Gram matrix for a given set of datapoints. @@ -133,8 +131,7 @@ class GP * Calls infer() everytime so that the Gram matrix is rebuild and the * Cholesky decomposition is computed. */ - void infer(const Eigen::VectorXd& data_loc, - const Eigen::VectorXd& data_out, + void infer(const Eigen::VectorXd& data_loc, const Eigen::VectorXd& data_out, const Eigen::VectorXd& data_var = Eigen::VectorXd()); /*! @@ -144,9 +141,7 @@ class GP * no prediction point is given, the last data point is used (extrapolation * mode). */ - void inferSD(const Eigen::VectorXd& data_loc, - const Eigen::VectorXd& data_out, - const int n, + void inferSD(const Eigen::VectorXd& data_loc, const Eigen::VectorXd& data_out, const int n, const Eigen::VectorXd& data_var = Eigen::VectorXd(), const double prediction_point = std::numeric_limits::quiet_NaN()); @@ -162,7 +157,7 @@ class GP * This function just builds the prior and mixed covariance matrices and * calls the other predict afterwards. */ - Eigen::VectorXd predict(const Eigen::VectorXd& locations, Eigen::VectorXd* variances = nullptr) const; + Eigen::VectorXd predict(const Eigen::VectorXd& locations, Eigen::VectorXd *variances = nullptr) const; /*! * Predicts the mean and covariance for a vector of locations based on @@ -171,7 +166,7 @@ class GP * This function just builds the prior and mixed covariance matrices and * calls the other predict afterwards. */ - Eigen::VectorXd predictProjected(const Eigen::VectorXd& locations, Eigen::VectorXd* variances = nullptr) const; + Eigen::VectorXd predictProjected(const Eigen::VectorXd& locations, Eigen::VectorXd *variances = nullptr) const; /*! * Does the real work for predict. Solves the Cholesky decomposition for the @@ -179,7 +174,7 @@ class GP * already. */ Eigen::VectorXd predict(const Eigen::MatrixXd& prior_cov, const Eigen::MatrixXd& mixed_cov, - const Eigen::MatrixXd& phi = Eigen::MatrixXd() , Eigen::VectorXd* variances = nullptr) const; + const Eigen::MatrixXd& phi = Eigen::MatrixXd(), Eigen::VectorXd *variances = nullptr) const; /*! * Sets the hyperparameters to the given vector. @@ -200,8 +195,6 @@ class GP * Disables the use of a explicit linear basis function. */ void disableExplicitTrend(); - - }; -#endif // ifndef GAUSSIAN_PROCESS_H +#endif // ifndef GAUSSIAN_PROCESS_H diff --git a/contributions/MPI_IS_gaussian_process/src/gaussian_process_guider.h b/contributions/MPI_IS_gaussian_process/src/gaussian_process_guider.h index 459f47209..658d4090a 100644 --- a/contributions/MPI_IS_gaussian_process/src/gaussian_process_guider.h +++ b/contributions/MPI_IS_gaussian_process/src/gaussian_process_guider.h @@ -71,7 +71,6 @@ enum Hyperparameters class GaussianProcessGuider { public: - struct data_point { double timestamp; @@ -104,28 +103,16 @@ class GaussianProcessGuider double SE1KSignalVariance_; double PKPeriodLength_; - guide_parameters() : - control_gain_(0.0), - min_move_(0.0), - prediction_gain_(0.0), - min_periods_for_inference_(0.0), - min_periods_for_period_estimation_(0.0), - points_for_approximation_(0), - compute_period_(false), - SE0KLengthScale_(0.0), - SE0KSignalVariance_(0.0), - PKLengthScale_(0.0), - PKSignalVariance_(0.0), - SE1KLengthScale_(0.0), - SE1KSignalVariance_(0.0), - PKPeriodLength_(0.0) + guide_parameters() + : control_gain_(0.0), min_move_(0.0), prediction_gain_(0.0), min_periods_for_inference_(0.0), + min_periods_for_period_estimation_(0.0), points_for_approximation_(0), compute_period_(false), + SE0KLengthScale_(0.0), SE0KSignalVariance_(0.0), PKLengthScale_(0.0), PKSignalVariance_(0.0), + SE1KLengthScale_(0.0), SE1KSignalVariance_(0.0), PKPeriodLength_(0.0) { } - }; private: - std::chrono::system_clock::time_point start_time_; // reference time std::chrono::system_clock::time_point last_time_; @@ -193,8 +180,6 @@ class GaussianProcessGuider */ double PredictGearError(double prediction_location); - - public: double GetControlGain() const; bool SetControlGain(double control_gain); @@ -276,25 +261,13 @@ class GaussianProcessGuider */ void UpdatePeriodLength(double period_length); - data_point& get_last_point() const - { - return circular_buffer_data_[circular_buffer_data_.size() - 1]; - } + data_point& get_last_point() const { return circular_buffer_data_[circular_buffer_data_.size() - 1]; } - data_point& get_second_last_point() const - { - return circular_buffer_data_[circular_buffer_data_.size() - 2]; - } + data_point& get_second_last_point() const { return circular_buffer_data_[circular_buffer_data_.size() - 2]; } - size_t get_number_of_measurements() const - { - return circular_buffer_data_.size(); - } + size_t get_number_of_measurements() const { return circular_buffer_data_.size(); } - void add_one_point() - { - circular_buffer_data_.push_front(data_point()); - } + void add_one_point() { circular_buffer_data_.push_front(data_point()); } /** * This method is needed for automated testing. It can inject data points. @@ -304,7 +277,8 @@ class GaussianProcessGuider /** * Takes timestamps, measurements and SNRs and returns them regularized in a matrix. */ - Eigen::MatrixXd regularize_dataset(const Eigen::VectorXd& timestamps, const Eigen::VectorXd& gear_error, const Eigen::VectorXd& variances); + Eigen::MatrixXd regularize_dataset(const Eigen::VectorXd& timestamps, const Eigen::VectorXd& gear_error, + const Eigen::VectorXd& variances); /** * Saves the GP data to a csv file for external analysis. Expensive! @@ -338,4 +312,4 @@ class GPDebug extern class GPDebug *GPDebug; -#endif // GAUSSIAN_PROCESS_GUIDER +#endif // GAUSSIAN_PROCESS_GUIDER diff --git a/contributions/MPI_IS_gaussian_process/tests/gaussian_process/evaluate_performance.cpp b/contributions/MPI_IS_gaussian_process/tests/gaussian_process/evaluate_performance.cpp index 460582ae4..7409c5aae 100644 --- a/contributions/MPI_IS_gaussian_process/tests/gaussian_process/evaluate_performance.cpp +++ b/contributions/MPI_IS_gaussian_process/tests/gaussian_process/evaluate_performance.cpp @@ -35,12 +35,12 @@ #include "gaussian_process_guider.h" #include "guide_performance_tools.h" -int main(int argc, char** argv) +int main(int argc, char **argv) { if (argc < 15) return -1; - GaussianProcessGuider* GPG; + GaussianProcessGuider *GPG; GaussianProcessGuider::guide_parameters parameters; diff --git a/contributions/MPI_IS_gaussian_process/tests/gaussian_process/gaussian_process_test.cpp b/contributions/MPI_IS_gaussian_process/tests/gaussian_process/gaussian_process_test.cpp index 5d68636f7..35b884070 100644 --- a/contributions/MPI_IS_gaussian_process/tests/gaussian_process/gaussian_process_test.cpp +++ b/contributions/MPI_IS_gaussian_process/tests/gaussian_process/gaussian_process_test.cpp @@ -47,12 +47,10 @@ class GPTest : public ::testing::Test { public: - GPTest(): random_vector_(11), location_vector_(11), hyper_parameters_(4), extra_parameters_(1) + GPTest() : random_vector_(11), location_vector_(11), hyper_parameters_(4), extra_parameters_(1) { - random_vector_ << -0.1799, -1.4215, -0.2774, 2.6056, 0.6471, -0.4366, - 1.3820, 0.4340, 0.8970, -0.7286, -1.7046; - location_vector_ << 0, 0.1000, 0.2000, 0.3000, 0.4000, 0.5000, 0.6000, - 0.7000, 0.8000, 0.9000, 1.0000; + random_vector_ << -0.1799, -1.4215, -0.2774, 2.6056, 0.6471, -0.4366, 1.3820, 0.4340, 0.8970, -0.7286, -1.7046; + location_vector_ << 0, 0.1000, 0.2000, 0.3000, 0.4000, 0.5000, 0.6000, 0.7000, 0.8000, 0.9000, 1.0000; hyper_parameters_ << 1, 2, 1, 2; extra_parameters_ << 5; @@ -87,8 +85,7 @@ TEST_F(GPTest, drawSamples_prior_mean_test) hyper_parameters_ << 1, 1, 1, 1; // use of smaller hypers needs less samples - covariance_function_ = - covariance_functions::PeriodicSquareExponential(hyper_parameters_); + covariance_function_ = covariance_functions::PeriodicSquareExponential(hyper_parameters_); gp_ = GP(covariance_function_); int N = 10000; // number of samples to draw @@ -116,8 +113,7 @@ TEST_F(GPTest, drawSamples_prior_covariance_test) hyper_parameters_ << 1, 1, 1, 1; // use of smaller hypers needs less samples - covariance_function_ = - covariance_functions::PeriodicSquareExponential(hyper_parameters_); + covariance_function_ = covariance_functions::PeriodicSquareExponential(hyper_parameters_); gp_ = GP(covariance_function_); int N = 20000; // number of samples to draw @@ -144,14 +140,13 @@ TEST_F(GPTest, drawSamples_prior_covariance_test) } } - TEST_F(GPTest, setCovarianceFunction) { Eigen::VectorXd hyperparams(6); hyperparams << 0.1, 15, 25, 15, 5000, 700; GP instance_gp; - EXPECT_TRUE(instance_gp.setCovarianceFunction(covariance_functions::PeriodicSquareExponential(hyperparams.segment(1,4)))); + EXPECT_TRUE(instance_gp.setCovarianceFunction(covariance_functions::PeriodicSquareExponential(hyperparams.segment(1, 4)))); GP instance_gp2 = GP(covariance_functions::PeriodicSquareExponential(Eigen::VectorXd::Zero(4))); instance_gp2.setHyperParameters(hyperparams); @@ -162,8 +157,6 @@ TEST_F(GPTest, setCovarianceFunction) } } - - TEST_F(GPTest, setCovarianceFunction_notworking_after_inference) { Eigen::VectorXd hyperparams(5); @@ -173,9 +166,7 @@ TEST_F(GPTest, setCovarianceFunction_notworking_after_inference) EXPECT_TRUE(instance_gp.setCovarianceFunction(covariance_functions::PeriodicSquareExponential(hyperparams.tail(4)))); int N = 250; - Eigen::VectorXd location = - 400 * math_tools::generate_uniform_random_matrix_0_1(N, 1) - - 200 * Eigen::MatrixXd::Ones(N, 1); + Eigen::VectorXd location = 400 * math_tools::generate_uniform_random_matrix_0_1(N, 1) - 200 * Eigen::MatrixXd::Ones(N, 1); Eigen::VectorXd output_from_converged_hyperparams = instance_gp.drawSample(location); @@ -194,8 +185,6 @@ TEST_F(GPTest, periodic_covariance_function_test) instance_gp.setHyperParameters(Eigen::VectorXd::Zero(6)); // should not assert } - - TEST_F(GPTest, infer_prediction_clear_test) { Eigen::VectorXd data_loc(1); @@ -226,12 +215,11 @@ TEST_F(GPTest, squareDistanceTest) Eigen::MatrixXd b(4, 5); Eigen::MatrixXd c(3, 4); - a << 3, 5, 5, 4, 6, 6, 3, 2, 3, 1, 0, 3; - - b << 1, 4, 5, 6, 7, 3, 4, 5, 6, 7, 0, 2, 4, 20, 2, 2, 3, -2, -2, 2; + a << 3, 5, 5, 4, 6, 6, 3, 2, 3, 1, 0, 3; - c << 1, 2, 3, 4, 4, 5, 6, 7, 6, 7, 8, 9; + b << 1, 4, 5, 6, 7, 3, 4, 5, 6, 7, 0, 2, 4, 20, 2, 2, 3, -2, -2, 2; + c << 1, 2, 3, 4, 4, 5, 6, 7, 6, 7, 8, 9; Eigen::MatrixXd sqdistc(4, 4); Eigen::MatrixXd sqdistab(3, 5); @@ -242,15 +230,12 @@ TEST_F(GPTest, squareDistanceTest) sqdistab << 15, 6, 15, 311, 27, 33, 14, 9, 329, 9, 35, 6, 27, 315, 7; // Test argument order - EXPECT_EQ(math_tools::squareDistance(a, b), math_tools::squareDistance - (b, a).transpose()); + EXPECT_EQ(math_tools::squareDistance(a, b), math_tools::squareDistance(b, a).transpose()); // Test that two identical Matrices give the same result // (wether they are the same object or not) - EXPECT_EQ(math_tools::squareDistance(a, Eigen::MatrixXd(a)), - math_tools::squareDistance(a, a)); - EXPECT_EQ(math_tools::squareDistance(a), math_tools::squareDistance( - a, a)); + EXPECT_EQ(math_tools::squareDistance(a, Eigen::MatrixXd(a)), math_tools::squareDistance(a, a)); + EXPECT_EQ(math_tools::squareDistance(a), math_tools::squareDistance(a, a)); // Test that the implementation gives the same result as the Matlab // implementation @@ -279,13 +264,11 @@ TEST_F(GPTest, CovarianceTest2) Eigen::MatrixXd kxX_matlab(5, 3); Eigen::MatrixXd kXX_matlab(3, 3); - kxx_matlab << 8.0000, 3.3046, 2.0043, 1.0803, 0.6553, - 3.3046, 8.0000, 3.3046, 2.0043, 1.0803, - 2.0043, 3.3046, 8.0000, 3.3046, 2.0043, - 1.0803, 2.0043, 3.3046, 8.0000, 3.3046, - 0.6553, 1.0803, 2.0043, 3.3046, 8.0000; + kxx_matlab << 8.0000, 3.3046, 2.0043, 1.0803, 0.6553, 3.3046, 8.0000, 3.3046, 2.0043, 1.0803, 2.0043, 3.3046, 8.0000, + 3.3046, 2.0043, 1.0803, 2.0043, 3.3046, 8.0000, 3.3046, 0.6553, 1.0803, 2.0043, 3.3046, 8.0000; - kxX_matlab << 8.0000, 2.0043, 0.6553, 3.3046, 3.3046, 1.0803, 2.0043, 8.0000, 2.0043, 1.0803, 3.3046, 3.3046, 0.6553, 2.0043, 8.0000; + kxX_matlab << 8.0000, 2.0043, 0.6553, 3.3046, 3.3046, 1.0803, 2.0043, 8.0000, 2.0043, 1.0803, 3.3046, 3.3046, 0.6553, + 2.0043, 8.0000; kXX_matlab << 8.0000, 2.0043, 0.6553, 2.0043, 8.0000, 2.0043, 0.6553, 2.0043, 8.0000; @@ -335,24 +318,15 @@ TEST_F(GPTest, CovarianceTest3) covFunc.setExtraParameters(periodLength); Eigen::MatrixXd kxx_matlab(5, 5); - kxx_matlab << - 3.00000, 1.06389, 0.97441, 1.07075, 0.27067, - 1.06389, 3.00000, 1.06389, 0.97441, 1.07075, - 0.97441, 1.06389, 3.00000, 1.06389, 0.97441, - 1.07075, 0.97441, 1.06389, 3.00000, 1.06389, - 0.27067, 1.07075, 0.97441, 1.06389, 3.00000; + kxx_matlab << 3.00000, 1.06389, 0.97441, 1.07075, 0.27067, 1.06389, 3.00000, 1.06389, 0.97441, 1.07075, 0.97441, 1.06389, + 3.00000, 1.06389, 0.97441, 1.07075, 0.97441, 1.06389, 3.00000, 1.06389, 0.27067, 1.07075, 0.97441, 1.06389, 3.00000; Eigen::MatrixXd kxX_matlab(5, 3); - kxX_matlab << - 3.00000, 0.97441, 0.27067, 1.06389, 1.06389, - 1.07075, 0.97441, 3.00000, 0.97441, 1.07075, - 1.06389, 1.06389, 0.27067, 0.97441, 3.00000; + kxX_matlab << 3.00000, 0.97441, 0.27067, 1.06389, 1.06389, 1.07075, 0.97441, 3.00000, 0.97441, 1.07075, 1.06389, 1.06389, + 0.27067, 0.97441, 3.00000; Eigen::MatrixXd kXX_matlab(3, 3); - kXX_matlab << - 3.00000, 0.97441, 0.27067, - 0.97441, 3.00000, 0.97441, - 0.27067, 0.97441, 3.00000; + kXX_matlab << 3.00000, 0.97441, 0.27067, 0.97441, 3.00000, 0.97441, 0.27067, 0.97441, 3.00000; Eigen::MatrixXd kxx = covFunc.evaluate(locations, locations); Eigen::MatrixXd kxX = covFunc.evaluate(locations, X); @@ -383,7 +357,7 @@ TEST_F(GPTest, CovarianceTest3) } } -int main(int argc, char** argv) +int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); diff --git a/contributions/MPI_IS_gaussian_process/tests/gaussian_process/gp_guider_test.cpp b/contributions/MPI_IS_gaussian_process/tests/gaussian_process/gp_guider_test.cpp index 019c16fe4..db127b7e1 100644 --- a/contributions/MPI_IS_gaussian_process/tests/gaussian_process/gp_guider_test.cpp +++ b/contributions/MPI_IS_gaussian_process/tests/gaussian_process/gp_guider_test.cpp @@ -59,14 +59,14 @@ class GPGTest : public ::testing::Test static const double DefaultSignalVarianceSE1Ker; // signal variance of the short range SE-kernel static const double DefaultPeriodLengthsForPeriodEstimation; // minimal number of period lengts for PL estimation - static const int DefaultNumPointsForApproximation; // number of points used in the GP approximation + static const int DefaultNumPointsForApproximation; // number of points used in the GP approximation static const double DefaultPredictionGain; // amount of GP prediction to blend in - static const bool DefaultComputePeriod; + static const bool DefaultComputePeriod; - GaussianProcessGuider* GPG; + GaussianProcessGuider *GPG; - GPGTest(): GPG(0) + GPGTest() : GPG(0) { GaussianProcessGuider::guide_parameters parameters; parameters.control_gain_ = DefaultControlGain; @@ -88,29 +88,26 @@ class GPGTest : public ::testing::Test GPG->SetLearningRate(1.0); // disable smooth learning } - ~GPGTest() - { - delete GPG; - } + ~GPGTest() { delete GPG; } }; -const double GPGTest::DefaultControlGain = 0.8; // control gain -const double GPGTest::DefaultPeriodLengthsForInference = 1.0; // minimal number of period lengths for full prediction -const double GPGTest::DefaultMinMove = 0.2; +const double GPGTest::DefaultControlGain = 0.8; // control gain +const double GPGTest::DefaultPeriodLengthsForInference = 1.0; // minimal number of period lengths for full prediction +const double GPGTest::DefaultMinMove = 0.2; -const double GPGTest::DefaultLengthScaleSE0Ker = 500.0; // length-scale of the long-range SE-kernel -const double GPGTest::DefaultSignalVarianceSE0Ker = 10.0; // signal variance of the long-range SE-kernel -const double GPGTest::DefaultLengthScalePerKer = 10.0; // length-scale of the periodic kernel -const double GPGTest::DefaultPeriodLengthPerKer = 100.0; // P_p, period-length of the periodic kernel -const double GPGTest::DefaultSignalVariancePerKer = 10.0; // signal variance of the periodic kernel -const double GPGTest::DefaultLengthScaleSE1Ker = 5.0; // length-scale of the short-range SE-kernel -const double GPGTest::DefaultSignalVarianceSE1Ker = 1.0; // signal variance of the short range SE-kernel +const double GPGTest::DefaultLengthScaleSE0Ker = 500.0; // length-scale of the long-range SE-kernel +const double GPGTest::DefaultSignalVarianceSE0Ker = 10.0; // signal variance of the long-range SE-kernel +const double GPGTest::DefaultLengthScalePerKer = 10.0; // length-scale of the periodic kernel +const double GPGTest::DefaultPeriodLengthPerKer = 100.0; // P_p, period-length of the periodic kernel +const double GPGTest::DefaultSignalVariancePerKer = 10.0; // signal variance of the periodic kernel +const double GPGTest::DefaultLengthScaleSE1Ker = 5.0; // length-scale of the short-range SE-kernel +const double GPGTest::DefaultSignalVarianceSE1Ker = 1.0; // signal variance of the short range SE-kernel const double GPGTest::DefaultPeriodLengthsForPeriodEstimation = 2.0; // minimal number of period lengts for PL estimation -const int GPGTest::DefaultNumPointsForApproximation = 100; // number of points used in the GP approximation -const double GPGTest::DefaultPredictionGain = 1.0; // amount of GP prediction to blend in +const int GPGTest::DefaultNumPointsForApproximation = 100; // number of points used in the GP approximation +const double GPGTest::DefaultPredictionGain = 1.0; // amount of GP prediction to blend in -const bool GPGTest::DefaultComputePeriod = true; +const bool GPGTest::DefaultComputePeriod = true; #include #include @@ -143,12 +140,12 @@ TEST_F(GPGTest, period_identification_test) { // first: prepare a nice GP with a sine wave double period_length = 300; - double max_time = 10*period_length; + double max_time = 10 * period_length; int resolution = 500; Eigen::VectorXd timestamps = Eigen::VectorXd::LinSpaced(resolution + 1, 0, max_time); - Eigen::VectorXd measurements = 50*(timestamps.array()*2*M_PI/period_length).sin(); - Eigen::VectorXd controls = 0*measurements; - Eigen::VectorXd SNRs = 100*Eigen::VectorXd::Ones(resolution + 1); + Eigen::VectorXd measurements = 50 * (timestamps.array() * 2 * M_PI / period_length).sin(); + Eigen::VectorXd controls = 0 * measurements; + Eigen::VectorXd SNRs = 100 * Eigen::VectorXd::Ones(resolution + 1); // feed data to the GPGuider for (int i = 0; i < timestamps.size(); ++i) @@ -170,11 +167,11 @@ TEST_F(GPGTest, min_move_test) // simple min-moves (without GP data) EXPECT_NEAR(GPG->result(0.15, 2.0, 3.0), 0, 1e-6); GPG->reset(); - EXPECT_NEAR(GPG->result(0.25, 2.0, 3.0), 0.25*0.8, 1e-6); + EXPECT_NEAR(GPG->result(0.25, 2.0, 3.0), 0.25 * 0.8, 1e-6); GPG->reset(); EXPECT_NEAR(GPG->result(-0.15, 2.0, 3.0), 0, 1e-6); GPG->reset(); - EXPECT_NEAR(GPG->result(-0.25, 2.0, 3.0), -0.25*0.8, 1e-6); + EXPECT_NEAR(GPG->result(-0.25, 2.0, 3.0), -0.25 * 0.8, 1e-6); GPG->reset(); GPG->save_gp_data(); @@ -185,15 +182,15 @@ TEST_F(GPGTest, gp_prediction_test) // first: prepare a nice GP with a sine wave double period_length = 300; - double max_time = 5*period_length; + double max_time = 5 * period_length; int resolution = 600; double prediction_length = 3.0; Eigen::VectorXd locations(2); Eigen::VectorXd predictions(2); Eigen::VectorXd timestamps = Eigen::VectorXd::LinSpaced(resolution + 1, 0, max_time); - Eigen::VectorXd measurements = 50*(timestamps.array()*2*M_PI/period_length).sin(); - Eigen::VectorXd controls = 0*measurements; - Eigen::VectorXd SNRs = 100*Eigen::VectorXd::Ones(resolution + 1); + Eigen::VectorXd measurements = 50 * (timestamps.array() * 2 * M_PI / period_length).sin(); + Eigen::VectorXd controls = 0 * measurements; + Eigen::VectorXd SNRs = 100 * Eigen::VectorXd::Ones(resolution + 1); // feed data to the GPGuider for (int i = 0; i < timestamps.size(); ++i) @@ -201,9 +198,9 @@ TEST_F(GPGTest, gp_prediction_test) GPG->inject_data_point(timestamps[i], measurements[i], SNRs[i], controls[i]); } locations << max_time, max_time + prediction_length; - predictions = 50*(locations.array()*2*M_PI/period_length).sin(); + predictions = 50 * (locations.array() * 2 * M_PI / period_length).sin(); // the first case is with an error smaller than min_move_ - EXPECT_NEAR(GPG->result(0.15, 2.0, prediction_length, max_time), predictions[1]-predictions[0], 2e-1); + EXPECT_NEAR(GPG->result(0.15, 2.0, prediction_length, max_time), predictions[1] - predictions[0], 2e-1); GPG->reset(); // feed data to the GPGuider @@ -212,7 +209,7 @@ TEST_F(GPGTest, gp_prediction_test) GPG->inject_data_point(timestamps[i], measurements[i], SNRs[i], controls[i]); } // the first case is with an error larger than min_move_ - EXPECT_NEAR(GPG->result(0.25, 2.0, prediction_length, max_time), 0.25*0.8+predictions[1]-predictions[0], 2e-1); + EXPECT_NEAR(GPG->result(0.25, 2.0, prediction_length, max_time), 0.25 * 0.8 + predictions[1] - predictions[0], 2e-1); GPG->save_gp_data(); } @@ -261,21 +258,21 @@ TEST_F(GPGTest, timer_test) } TEST_F(GPGTest, gp_projection_test) -{ // this test should fail when output projections are disabled and should pass when they are enabled +{ // this test should fail when output projections are disabled and should pass when they are enabled // first: prepare a nice GP with a sine wave double period_length = 300; - double max_time = 5*period_length; + double max_time = 5 * period_length; int resolution = 600; double prediction_length = 3.0; Eigen::VectorXd locations(2); Eigen::VectorXd predictions(2); Eigen::VectorXd timestamps = Eigen::VectorXd::LinSpaced(resolution + 1, 0, max_time); - Eigen::VectorXd measurements = 50*(timestamps.array()*2*M_PI/period_length).sin(); - Eigen::VectorXd controls = 0*measurements; - Eigen::VectorXd SNRs = 100*Eigen::VectorXd::Ones(resolution + 1); + Eigen::VectorXd measurements = 50 * (timestamps.array() * 2 * M_PI / period_length).sin(); + Eigen::VectorXd controls = 0 * measurements; + Eigen::VectorXd SNRs = 100 * Eigen::VectorXd::Ones(resolution + 1); - Eigen::VectorXd sine_noise = 5*(timestamps.array()*2*M_PI/26).sin(); // smaller "disturbance" to add + Eigen::VectorXd sine_noise = 5 * (timestamps.array() * 2 * M_PI / 26).sin(); // smaller "disturbance" to add measurements = measurements + sine_noise; @@ -285,32 +282,32 @@ TEST_F(GPGTest, gp_projection_test) GPG->inject_data_point(timestamps[i], measurements[i], SNRs[i], controls[i]); } locations << max_time, max_time + prediction_length; - predictions = 50*(locations.array()*2*M_PI/period_length).sin(); + predictions = 50 * (locations.array() * 2 * M_PI / period_length).sin(); // the first case is with an error smaller than min_move_ - EXPECT_NEAR(GPG->result(0.0, 2.0, prediction_length, max_time), predictions[1]-predictions[0], 3e-1); + EXPECT_NEAR(GPG->result(0.0, 2.0, prediction_length, max_time), predictions[1] - predictions[0], 3e-1); GPG->reset(); GPG->save_gp_data(); } TEST_F(GPGTest, linear_drift_identification_test) -{ // when predicting one period length ahead, only linear drift should show +{ // when predicting one period length ahead, only linear drift should show // first: prepare a nice GP with a sine wave double period_length = 300; - double max_time = 3*period_length; + double max_time = 3 * period_length; int resolution = 300; double prediction_length = period_length; // necessary to only see the drift Eigen::VectorXd locations(2); Eigen::VectorXd predictions(2); Eigen::VectorXd timestamps = Eigen::VectorXd::LinSpaced(resolution + 1, 0, max_time); - Eigen::VectorXd measurements = 0*timestamps; - Eigen::VectorXd sine_data = 50*(timestamps.array()*2*M_PI/period_length).sin(); - Eigen::VectorXd drift = 0.25*timestamps; // drift to add + Eigen::VectorXd measurements = 0 * timestamps; + Eigen::VectorXd sine_data = 50 * (timestamps.array() * 2 * M_PI / period_length).sin(); + Eigen::VectorXd drift = 0.25 * timestamps; // drift to add Eigen::VectorXd gear_function = sine_data + drift; Eigen::VectorXd controls(timestamps.size()); - controls << gear_function.tail(gear_function.size()-1) - gear_function.head(gear_function.size()-1), 0; - Eigen::VectorXd SNRs = 100*Eigen::VectorXd::Ones(resolution + 1); + controls << gear_function.tail(gear_function.size() - 1) - gear_function.head(gear_function.size() - 1), 0; + Eigen::VectorXd SNRs = 100 * Eigen::VectorXd::Ones(resolution + 1); std::vector parameters = GPG->GetGPHyperparameters(); parameters[SE0KSignalVariance] = 1e-10; // disable long-range SE kernel @@ -327,31 +324,31 @@ TEST_F(GPGTest, linear_drift_identification_test) GPG->inject_data_point(timestamps[i], measurements[i], SNRs[i], controls[i]); } locations << 5000, 5000 + prediction_length; - predictions = 0.25*locations; // only predict linear drift here + predictions = 0.25 * locations; // only predict linear drift here // the first case is with an error smaller than min_move_ - EXPECT_NEAR(GPG->result(0.0, 100.0, prediction_length, max_time), predictions[1]-predictions[0], 2e-1); + EXPECT_NEAR(GPG->result(0.0, 100.0, prediction_length, max_time), predictions[1] - predictions[0], 2e-1); GPG->save_gp_data(); } TEST_F(GPGTest, data_preparation_test) -{ // no matter whether the gear function shows up in the controls or in the measurements, +{ // no matter whether the gear function shows up in the controls or in the measurements, // the predictions should be identical // first: prepare a nice GP with a sine wave double period_length = 300; - double max_time = 3*period_length; + double max_time = 3 * period_length; int resolution = 200; double prediction_length = 3.0; Eigen::VectorXd timestamps = Eigen::VectorXd::LinSpaced(resolution + 1, 0, max_time); Eigen::VectorXd measurements(timestamps.size()); - Eigen::VectorXd sine_data = 50*(timestamps.array()*2*M_PI/period_length).sin(); + Eigen::VectorXd sine_data = 50 * (timestamps.array() * 2 * M_PI / period_length).sin(); Eigen::VectorXd controls(timestamps.size()); - Eigen::VectorXd SNRs = 100*Eigen::VectorXd::Ones(resolution + 1); + Eigen::VectorXd SNRs = 100 * Eigen::VectorXd::Ones(resolution + 1); // first option: the error was "compensated" and therefore only shows up in the controls - controls << sine_data.tail(sine_data.size()-1) - sine_data.head(sine_data.size()-1), 0; - measurements = 0*timestamps; + controls << sine_data.tail(sine_data.size() - 1) - sine_data.head(sine_data.size() - 1), 0; + measurements = 0 * timestamps; // feed data to the GPGuider for (int i = 0; i < timestamps.size(); ++i) @@ -362,7 +359,7 @@ TEST_F(GPGTest, data_preparation_test) GPG->reset(); // second option: the error is not compensated and therefore visible in the measurement - controls = 0*controls; + controls = 0 * controls; measurements = sine_data; // feed data to the GPGuider @@ -389,7 +386,7 @@ TEST_F(GPGTest, real_data_test) int i = 0; CSVRow row; - while(file >> row) + while (file >> row) { // ignore special lines: "INFO", "Frame", "DROP" if (row[0][0] == 'I' || row[0][0] == 'F' || row[2][1] == 'D') @@ -412,7 +409,7 @@ TEST_F(GPGTest, real_data_test) GPG->result(0.0, 25.0, 3.0, time); - EXPECT_NEAR(GPG->GetGPHyperparameters()[PKPeriodLength],483.0,5); + EXPECT_NEAR(GPG->GetGPHyperparameters()[PKPeriodLength], 483.0, 5); GPG->save_gp_data(); } @@ -432,7 +429,7 @@ TEST_F(GPGTest, parameter_filter_test) int i = 0; CSVRow row; - while(infile >> row) + while (infile >> row) { if (row[0][0] == 'p') // ignore the first line { @@ -471,9 +468,9 @@ TEST_F(GPGTest, period_interpolation_test) double max_time = 2345; int resolution = 527; Eigen::VectorXd timestamps = Eigen::VectorXd::LinSpaced(resolution + 1, 0, max_time); - Eigen::VectorXd measurements = 50*(timestamps.array()*2*M_PI/period_length).sin(); - Eigen::VectorXd controls = 0*measurements; - Eigen::VectorXd SNRs = 100*Eigen::VectorXd::Ones(resolution + 1); + Eigen::VectorXd measurements = 50 * (timestamps.array() * 2 * M_PI / period_length).sin(); + Eigen::VectorXd controls = 0 * measurements; + Eigen::VectorXd SNRs = 100 * Eigen::VectorXd::Ones(resolution + 1); // feed data to the GPGuider for (int i = 0; i < timestamps.size(); ++i) @@ -496,13 +493,13 @@ TEST_F(GPGTest, data_regularization_test) // second: mess up the grid of time stamps Eigen::VectorXd timestamps(resolution); - timestamps << Eigen::VectorXd::LinSpaced(resolution/2, 0, max_time/6), - Eigen::VectorXd::LinSpaced(resolution/2, max_time/6 + 0.5, max_time); - timestamps += 0.5*math_tools::generate_normal_random_matrix(resolution, 1); + timestamps << Eigen::VectorXd::LinSpaced(resolution / 2, 0, max_time / 6), + Eigen::VectorXd::LinSpaced(resolution / 2, max_time / 6 + 0.5, max_time); + timestamps += 0.5 * math_tools::generate_normal_random_matrix(resolution, 1); - Eigen::VectorXd measurements = 50*(timestamps.array()*2*M_PI/period_length).sin(); - Eigen::VectorXd controls = 0*measurements; - Eigen::VectorXd SNRs = 100*Eigen::VectorXd::Ones(resolution + 1); + Eigen::VectorXd measurements = 50 * (timestamps.array() * 2 * M_PI / period_length).sin(); + Eigen::VectorXd controls = 0 * measurements; + Eigen::VectorXd SNRs = 100 * Eigen::VectorXd::Ones(resolution + 1); // feed data to the GPGuider for (int i = 0; i < timestamps.size(); ++i) @@ -536,7 +533,7 @@ TEST_F(GPGTest, DISABLED_log_period_length) int i = 0; CSVRow row; - while(file >> row) + while (file >> row) { // ignore special lines: "INFO", "Frame", "DROP" if (row[0][0] == 'I' || row[0][0] == 'F' || row[2][1] == 'D') @@ -570,7 +567,7 @@ TEST_F(GPGTest, real_data_test_nan_issue) for (int i = 0; i < data.cols(); ++i) { - GPG->inject_data_point(data(0,i), data(1,i), data(3,i), data(2,i)); + GPG->inject_data_point(data(0, i), data(1, i), data(3, i), data(2, i)); } EXPECT_GT(data.cols(), 0) << "dataset was empty or not present"; @@ -582,7 +579,7 @@ TEST_F(GPGTest, real_data_test_nan_issue) GPG->save_gp_data(); } -int main(int argc, char** argv) +int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); diff --git a/contributions/MPI_IS_gaussian_process/tests/gaussian_process/guide_performance_test.cpp b/contributions/MPI_IS_gaussian_process/tests/gaussian_process/guide_performance_test.cpp index 1679119b6..20b0b6277 100644 --- a/contributions/MPI_IS_gaussian_process/tests/gaussian_process/guide_performance_test.cpp +++ b/contributions/MPI_IS_gaussian_process/tests/gaussian_process/guide_performance_test.cpp @@ -60,17 +60,17 @@ class GuidePerformanceTest : public ::testing::Test static const double DefaultSignalVarianceSE1Ker; // signal variance of the short range SE-kernel static const double DefaultPeriodLengthsPeriodEstimation; // minimal number of points for doing the period identification - static const int DefaultNumPointsForApproximation; // number of points used in the GP approximation + static const int DefaultNumPointsForApproximation; // number of points used in the GP approximation static const double DefaultPredictionGain; // amount of GP prediction to blend in - static const bool DefaultComputePeriod; + static const bool DefaultComputePeriod; - GaussianProcessGuider* GPG; + GaussianProcessGuider *GPG; GAHysteresis GAH; std::string filename; double improvement; - GuidePerformanceTest(): GPG(0), improvement(0.0) + GuidePerformanceTest() : GPG(0), improvement(0.0) { GaussianProcessGuider::guide_parameters parameters; parameters.control_gain_ = DefaultControlGain; @@ -91,35 +91,32 @@ class GuidePerformanceTest : public ::testing::Test GPG = new GaussianProcessGuider(parameters); } - ~GuidePerformanceTest() - { - delete GPG; - } + ~GuidePerformanceTest() { delete GPG; } }; -const double GuidePerformanceTest::DefaultControlGain = 0.7; // control gain -const double GuidePerformanceTest::DefaultPeriodLengthsInference = 2.0; // period lengths until inference -const double GuidePerformanceTest::DefaultMinMove = 0.2; // minimal move +const double GuidePerformanceTest::DefaultControlGain = 0.7; // control gain +const double GuidePerformanceTest::DefaultPeriodLengthsInference = 2.0; // period lengths until inference +const double GuidePerformanceTest::DefaultMinMove = 0.2; // minimal move -const double GuidePerformanceTest::DefaultLengthScaleSE0Ker = 700.0; // length-scale of the long-range SE-kernel -const double GuidePerformanceTest::DefaultSignalVarianceSE0Ker = 20.0; // signal variance of the long-range SE-kernel -const double GuidePerformanceTest::DefaultLengthScalePerKer = 10.0; // length-scale of the periodic kernel -const double GuidePerformanceTest::DefaultPeriodLengthPerKer = 200.0; // P_p, period-length of the periodic kernel -const double GuidePerformanceTest::DefaultSignalVariancePerKer = 20.0; // signal variance of the periodic kernel -const double GuidePerformanceTest::DefaultLengthScaleSE1Ker = 25.0; // length-scale of the short-range SE-kernel -const double GuidePerformanceTest::DefaultSignalVarianceSE1Ker = 10.0; // signal variance of the short range SE-kernel +const double GuidePerformanceTest::DefaultLengthScaleSE0Ker = 700.0; // length-scale of the long-range SE-kernel +const double GuidePerformanceTest::DefaultSignalVarianceSE0Ker = 20.0; // signal variance of the long-range SE-kernel +const double GuidePerformanceTest::DefaultLengthScalePerKer = 10.0; // length-scale of the periodic kernel +const double GuidePerformanceTest::DefaultPeriodLengthPerKer = 200.0; // P_p, period-length of the periodic kernel +const double GuidePerformanceTest::DefaultSignalVariancePerKer = 20.0; // signal variance of the periodic kernel +const double GuidePerformanceTest::DefaultLengthScaleSE1Ker = 25.0; // length-scale of the short-range SE-kernel +const double GuidePerformanceTest::DefaultSignalVarianceSE1Ker = 10.0; // signal variance of the short range SE-kernel const double GuidePerformanceTest::DefaultPeriodLengthsPeriodEstimation = 2.0; // period lengths until FFT -const int GuidePerformanceTest::DefaultNumPointsForApproximation = 100; // number of points used in the GP approximation -const double GuidePerformanceTest::DefaultPredictionGain = 0.5; // amount of GP prediction to blend in +const int GuidePerformanceTest::DefaultNumPointsForApproximation = 100; // number of points used in the GP approximation +const double GuidePerformanceTest::DefaultPredictionGain = 0.5; // amount of GP prediction to blend in -const bool GuidePerformanceTest::DefaultComputePeriod = true; +const bool GuidePerformanceTest::DefaultComputePeriod = true; TEST_F(GuidePerformanceTest, performance_dataset01) { filename = "performance_dataset01.txt"; improvement = calculate_improvement(filename, GAH, GPG); - std::cout << "Improvement of GPGuiding over Hysteresis: " << 100*improvement << "%" << std::endl; + std::cout << "Improvement of GPGuiding over Hysteresis: " << 100 * improvement << "%" << std::endl; EXPECT_GT(improvement, 0); } @@ -127,7 +124,7 @@ TEST_F(GuidePerformanceTest, performance_dataset02) { filename = "performance_dataset02.txt"; improvement = calculate_improvement(filename, GAH, GPG); - std::cout << "Improvement of GPGuiding over Hysteresis: " << 100*improvement << "%" << std::endl; + std::cout << "Improvement of GPGuiding over Hysteresis: " << 100 * improvement << "%" << std::endl; EXPECT_GT(improvement, 0); } @@ -135,7 +132,7 @@ TEST_F(GuidePerformanceTest, performance_dataset03) { filename = "performance_dataset03.txt"; improvement = calculate_improvement(filename, GAH, GPG); - std::cout << "Improvement of GPGuiding over Hysteresis: " << 100*improvement << "%" << std::endl; + std::cout << "Improvement of GPGuiding over Hysteresis: " << 100 * improvement << "%" << std::endl; EXPECT_GT(improvement, 0); } @@ -143,7 +140,7 @@ TEST_F(GuidePerformanceTest, performance_dataset04) { filename = "performance_dataset04.txt"; improvement = calculate_improvement(filename, GAH, GPG); - std::cout << "Improvement of GPGuiding over Hysteresis: " << 100*improvement << "%" << std::endl; + std::cout << "Improvement of GPGuiding over Hysteresis: " << 100 * improvement << "%" << std::endl; EXPECT_GT(improvement, 0); } @@ -151,7 +148,7 @@ TEST_F(GuidePerformanceTest, performance_dataset05) { filename = "performance_dataset05.txt"; improvement = calculate_improvement(filename, GAH, GPG); - std::cout << "Improvement of GPGuiding over Hysteresis: " << 100*improvement << "%" << std::endl; + std::cout << "Improvement of GPGuiding over Hysteresis: " << 100 * improvement << "%" << std::endl; EXPECT_GT(improvement, 0); } @@ -159,7 +156,7 @@ TEST_F(GuidePerformanceTest, performance_dataset06) { filename = "performance_dataset06.txt"; improvement = calculate_improvement(filename, GAH, GPG); - std::cout << "Improvement of GPGuiding over Hysteresis: " << 100*improvement << "%" << std::endl; + std::cout << "Improvement of GPGuiding over Hysteresis: " << 100 * improvement << "%" << std::endl; EXPECT_GT(improvement, 0); } @@ -167,7 +164,7 @@ TEST_F(GuidePerformanceTest, performance_dataset07) { filename = "performance_dataset07.txt"; improvement = calculate_improvement(filename, GAH, GPG); - std::cout << "Improvement of GPGuiding over Hysteresis: " << 100*improvement << "%" << std::endl; + std::cout << "Improvement of GPGuiding over Hysteresis: " << 100 * improvement << "%" << std::endl; EXPECT_GT(improvement, 0); } @@ -175,11 +172,11 @@ TEST_F(GuidePerformanceTest, performance_dataset08) { filename = "performance_dataset08.txt"; improvement = calculate_improvement(filename, GAH, GPG); - std::cout << "Improvement of GPGuiding over Hysteresis: " << 100*improvement << "%" << std::endl; + std::cout << "Improvement of GPGuiding over Hysteresis: " << 100 * improvement << "%" << std::endl; EXPECT_GT(improvement, 0); } -int main(int argc, char** argv) +int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); diff --git a/contributions/MPI_IS_gaussian_process/tests/gaussian_process/guide_performance_tools.h b/contributions/MPI_IS_gaussian_process/tests/gaussian_process/guide_performance_tools.h index 6b43d7c94..c61ce08e6 100644 --- a/contributions/MPI_IS_gaussian_process/tests/gaussian_process/guide_performance_tools.h +++ b/contributions/MPI_IS_gaussian_process/tests/gaussian_process/guide_performance_tools.h @@ -43,37 +43,32 @@ class CSVRow { - public: - std::string const& operator[](std::size_t index) const +public: + std::string const& operator[](std::size_t index) const { return m_data[index]; } + std::size_t size() const { return m_data.size(); } + void readNextRow(std::istream& str) + { + std::string line; + std::getline(str, line); + + std::stringstream lineStream(line); + std::string cell; + + m_data.clear(); + while (std::getline(lineStream, cell, ',')) { - return m_data[index]; + m_data.push_back(cell); } - std::size_t size() const + // This checks for a trailing comma with no data after it. + if (!lineStream && cell.empty()) { - return m_data.size(); + // If there was a trailing comma then add an empty element. + m_data.push_back(""); } - void readNextRow(std::istream& str) - { - std::string line; - std::getline(str, line); - - std::stringstream lineStream(line); - std::string cell; + } - m_data.clear(); - while(std::getline(lineStream, cell, ',')) - { - m_data.push_back(cell); - } - // This checks for a trailing comma with no data after it. - if (!lineStream && cell.empty()) - { - // If there was a trailing comma then add an empty element. - m_data.push_back(""); - } - } - private: - std::vector m_data; +private: + std::vector m_data; }; inline std::istream& operator>>(std::istream& str, CSVRow& data) @@ -88,7 +83,7 @@ inline Eigen::ArrayXXd read_data_from_file(std::string filename) int i = 0; CSVRow row; - while(file >> row) + while (file >> row) { // ignore special lines if (row[0][0] == 'F' || row.size() < 18 || row[5].size() == 0) @@ -114,14 +109,14 @@ inline Eigen::ArrayXXd read_data_from_file(std::string filename) file.clear(); file.open(filename); double dither = 0.0; - while(file >> row) + while (file >> row) { if (row[0][0] == 'I') { std::string infoline(row[0]); - if (infoline.substr(6,6) == "DITHER") + if (infoline.substr(6, 6) == "DITHER") { - dither = std::stod(infoline.substr(15,10)); + dither = std::stod(infoline.substr(15, 10)); } } @@ -141,7 +136,7 @@ inline Eigen::ArrayXXd read_data_from_file(std::string filename) SNRs(i) = std::stod(row[16]); } - Eigen::ArrayXXd result(4,N); + Eigen::ArrayXXd result(4, N); result.row(0) = times; result.row(1) = measurements; result.row(2) = controls; @@ -156,14 +151,14 @@ inline double get_exposure_from_file(std::string filename) CSVRow row; double exposure = 3.0; // initialize a default value - while(file >> row) + while (file >> row) { if (row[0][0] == 'E') { std::string infoline(row[0]); - if (infoline.substr(0,8) == "Exposure") + if (infoline.substr(0, 8) == "Exposure") { - exposure = std::stoi(infoline.substr(11,4)) / 1000.0; + exposure = std::stoi(infoline.substr(11, 4)) / 1000.0; } } } @@ -181,8 +176,7 @@ class GAHysteresis double m_minMove; double m_lastMove; - GAHysteresis() : m_hysteresis(0.1), m_aggression(0.7), m_minMove(0.2), m_lastMove(0.0) - { } + GAHysteresis() : m_hysteresis(0.1), m_aggression(0.7), m_minMove(0.2), m_lastMove(0.0) { } double result(double input) { @@ -204,7 +198,7 @@ class GAHysteresis /* * Calculates the improvement of the GP Guider over Hysteresis on a dataset. */ -inline double calculate_improvement(std::string filename, GAHysteresis GAH, GaussianProcessGuider* GPG) +inline double calculate_improvement(std::string filename, GAHysteresis GAH, GaussianProcessGuider *GPG) { Eigen::ArrayXXd data = read_data_from_file(filename); double exposure = get_exposure_from_file(filename); @@ -216,18 +210,18 @@ inline double calculate_improvement(std::string filename, GAHysteresis GAH, Gaus double hysteresis_control = 0.0; double hysteresis_state = measurements(0); - Eigen::ArrayXd hysteresis_states(times.size()-2); + Eigen::ArrayXd hysteresis_states(times.size() - 2); double gp_guider_control = 0.0; double gp_guider_state = measurements(0); - Eigen::ArrayXd gp_guider_states(times.size()-2); + Eigen::ArrayXd gp_guider_states(times.size() - 2); - for (int i = 0; i < times.size()-2; ++i) + for (int i = 0; i < times.size() - 2; ++i) { hysteresis_control = GAH.result(hysteresis_state); // this is a simple telescope "simulator" - hysteresis_state = hysteresis_state + (measurements(i+1) - (measurements(i) - controls(i))) - hysteresis_control; + hysteresis_state = hysteresis_state + (measurements(i + 1) - (measurements(i) - controls(i))) - hysteresis_control; hysteresis_states(i) = hysteresis_state; GPG->reset(); @@ -236,14 +230,14 @@ inline double calculate_improvement(std::string filename, GAHysteresis GAH, Gaus GPG->inject_data_point(times(j), measurements(j), SNRs(j), controls(j)); } gp_guider_control = GPG->result(gp_guider_state, SNRs(i), exposure); - gp_guider_state = gp_guider_state + (measurements(i+1) - (measurements(i) - controls(i))) - gp_guider_control; + gp_guider_state = gp_guider_state + (measurements(i + 1) - (measurements(i) - controls(i))) - gp_guider_control; assert(fabs(gp_guider_state) < 100); gp_guider_states(i) = gp_guider_state; } - double gp_guider_rms = std::sqrt(gp_guider_states.pow(2).sum()/gp_guider_states.size()); - double hysteresis_rms = std::sqrt(hysteresis_states.array().pow(2).sum()/hysteresis_states.size()); + double gp_guider_rms = std::sqrt(gp_guider_states.pow(2).sum() / gp_guider_states.size()); + double hysteresis_rms = std::sqrt(hysteresis_states.array().pow(2).sum() / hysteresis_states.size()); return 1 - gp_guider_rms / hysteresis_rms; } diff --git a/contributions/MPI_IS_gaussian_process/tests/gaussian_process/math_tools_test.cpp b/contributions/MPI_IS_gaussian_process/tests/gaussian_process/math_tools_test.cpp index a66375943..81fb283c9 100644 --- a/contributions/MPI_IS_gaussian_process/tests/gaussian_process/math_tools_test.cpp +++ b/contributions/MPI_IS_gaussian_process/tests/gaussian_process/math_tools_test.cpp @@ -41,12 +41,10 @@ TEST(MathToolsTest, BoxMullerTest) { Eigen::VectorXd vRand(10); - vRand << 0, 0.1111, 0.2222, 0.3333, 0.4444, 0.5556, 0.6667, 0.7778, 0.8889, - 1.0000; + vRand << 0, 0.1111, 0.2222, 0.3333, 0.4444, 0.5556, 0.6667, 0.7778, 0.8889, 1.0000; Eigen::VectorXd matlab_result(10); - matlab_result << -6.3769, -1.0481, 0.3012, 1.1355, 1.2735, - -2.3210, -1.8154, -1.7081, -0.9528, -0.0000; + matlab_result << -6.3769, -1.0481, 0.3012, 1.1355, 1.2735, -2.3210, -1.8154, -1.7081, -0.9528, -0.0000; Eigen::VectorXd result = math_tools::box_muller(vRand); @@ -168,7 +166,7 @@ TEST(MathToolsTest, SpectrumTest) Eigen::VectorXd expected_amplitudes(4), expected_frequencies(4); expected_amplitudes << 0, 0, 0, 16; - expected_frequencies << 0.1250, 0.2500, 0.3750, 0.5000; + expected_frequencies << 0.1250, 0.2500, 0.3750, 0.5000; std::pair result = math_tools::compute_spectrum(y, 8); Eigen::VectorXd amplitudes = result.first; @@ -208,9 +206,8 @@ TEST(MathToolsTest, StdTest) EXPECT_NEAR(math_tools::stdandard_deviation(data), matlab_result, 1e-3); } -int main(int argc, char** argv) +int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } - diff --git a/contributions/MPI_IS_gaussian_process/tools/math_tools.cpp b/contributions/MPI_IS_gaussian_process/tools/math_tools.cpp index 171108991..d948248cc 100644 --- a/contributions/MPI_IS_gaussian_process/tools/math_tools.cpp +++ b/contributions/MPI_IS_gaussian_process/tools/math_tools.cpp @@ -49,185 +49,179 @@ namespace math_tools { - Eigen::MatrixXd squareDistance(const Eigen::MatrixXd& a, const Eigen::MatrixXd& b) +Eigen::MatrixXd squareDistance(const Eigen::MatrixXd& a, const Eigen::MatrixXd& b) +{ + int aRows = a.rows(); // bRows must be identical to aRows + int aCols = a.cols(); + int bCols = b.cols(); + + Eigen::MatrixXd am(aRows, aCols); // mean-corrected a + Eigen::MatrixXd bm(aRows, bCols); // mean-corrected b + // final result, aCols x bCols + Eigen::MatrixXd result(aCols, bCols); + + Eigen::VectorXd mean(aRows); + + /* If the two Matrices have the same address, it means the function was + called from the overloaded version, and thus the mean only has to be + computed once. + */ + if (&a == &b) // Same address? { - int aRows = a.rows(); // bRows must be identical to aRows - int aCols = a.cols(); - int bCols = b.cols(); - - Eigen::MatrixXd am(aRows, aCols); // mean-corrected a - Eigen::MatrixXd bm(aRows, bCols); // mean-corrected b - // final result, aCols x bCols - Eigen::MatrixXd result(aCols, bCols); - - Eigen::VectorXd mean(aRows); - - /* If the two Matrices have the same address, it means the function was - called from the overloaded version, and thus the mean only has to be - computed once. - */ - if (&a == &b) // Same address? - { - mean = a.rowwise().mean(); - am = a.colwise() - mean; - bm = am; - } - else + mean = a.rowwise().mean(); + am = a.colwise() - mean; + bm = am; + } + else + { + if (aRows != b.rows()) { - if (aRows != b.rows()) - { - throw std::runtime_error("Matrix dimension incorrect."); - } - - mean = static_cast(aCols) / (aCols + bCols) * a.rowwise().mean() + - static_cast(bCols) / (bCols + aCols) * b.rowwise().mean(); - - // The mean of the two Matrices is subtracted beforehand, because the - // squared error is independent of the mean and this makes the squares - // smaller. - am = a.colwise() - mean; - bm = b.colwise() - mean; + throw std::runtime_error("Matrix dimension incorrect."); } - // The square distance calculation (a - b)^2 is calculated as a^2 - 2*ab * b^2 - // (using the binomial formula) because of numerical stability. - - // fast version - return - ((am.array().square().colwise().sum().transpose().rowwise().replicate(bCols).matrix() - + bm.array().square().colwise().sum().colwise().replicate(aCols).matrix()) - - 2 * (am.transpose()) * bm).array().max(0); - - /* // verbose version - Eigen::MatrixXd a_square = - am.array().square().colwise().sum().transpose().rowwise().replicate(bCols); - - Eigen::MatrixXd b_square = - bm.array().square().colwise().sum().colwise().replicate(aCols); - - Eigen::MatrixXd twoab = 2 * (am.transpose()) * bm; + mean = static_cast(aCols) / (aCols + bCols) * a.rowwise().mean() + + static_cast(bCols) / (bCols + aCols) * b.rowwise().mean(); - return ((a_square.matrix() + b_square.matrix()) - twoab).array().max(0); - */ + // The mean of the two Matrices is subtracted beforehand, because the + // squared error is independent of the mean and this makes the squares + // smaller. + am = a.colwise() - mean; + bm = b.colwise() - mean; } - Eigen::MatrixXd squareDistance(const Eigen::MatrixXd& a) - { - return squareDistance(a, a); - } + // The square distance calculation (a - b)^2 is calculated as a^2 - 2*ab * b^2 + // (using the binomial formula) because of numerical stability. - Eigen::MatrixXd generate_uniform_random_matrix_0_1( - const size_t n, - const size_t m) - { - Eigen::MatrixXd result = Eigen::MatrixXd(n, m); - result.setRandom(); // here we get uniform random values between -1 and 1 - Eigen::MatrixXd temp = result.array() + 1; // shift to positive - result = temp / 2.0; // divide to obtain 0/1 interval - // prevent numerical problems by enforcing a particular interval - result = result.array().max(1e-10); // eliminate too small values - result = result.array().min(1.0); // eliminiate too large values - return result; - } + // fast version + return ((am.array().square().colwise().sum().transpose().rowwise().replicate(bCols).matrix() + + bm.array().square().colwise().sum().colwise().replicate(aCols).matrix()) - + 2 * (am.transpose()) * bm) + .array() + .max(0); - Eigen::MatrixXd box_muller(const Eigen::VectorXd& vRand) - { - size_t n = vRand.rows(); - size_t m = n / 2; // Box-Muller transforms pairs of numbers + /* // verbose version + Eigen::MatrixXd a_square = + am.array().square().colwise().sum().transpose().rowwise().replicate(bCols); - Eigen::ArrayXd rand1 = vRand.head(m); - Eigen::ArrayXd rand2 = vRand.tail(m); + Eigen::MatrixXd b_square = + bm.array().square().colwise().sum().colwise().replicate(aCols); - /* Implemented according to - * http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform - */ + Eigen::MatrixXd twoab = 2 * (am.transpose()) * bm; - // enforce interval to avoid numerical issues - rand1 = rand1.max(1e-10); - rand1 = rand1.min(1.0); + return ((a_square.matrix() + b_square.matrix()) - twoab).array().max(0); + */ +} - rand1 = -2 * rand1.log(); - rand1 = rand1.sqrt(); // this is an amplitude - - rand2 = rand2 * 2 * M_PI; // this is a random angle in the complex plane - - Eigen::MatrixXd result(2 * m, 1); - Eigen::MatrixXd res1 = (rand1 * rand2.cos()).matrix(); // first elements - Eigen::MatrixXd res2 = (rand1 * rand2.sin()).matrix(); // second elements - result << res1, res2; // combine the pairs into one vector - - return result; - } +Eigen::MatrixXd squareDistance(const Eigen::MatrixXd& a) +{ + return squareDistance(a, a); +} - Eigen::MatrixXd generate_normal_random_matrix(const size_t n, const size_t m) - { - // if n*m is odd, we need one random number extra! - // therefore, we have to round up here. - size_t N = static_cast(std::ceil(n * m / 2.0)); - - Eigen::MatrixXd result(2 * N, 1); - // push random samples through the Box-Muller transform - result = box_muller(generate_uniform_random_matrix_0_1(2 * N, 1)); - result.conservativeResize(n, m); - return result; - } +Eigen::MatrixXd generate_uniform_random_matrix_0_1(const size_t n, const size_t m) +{ + Eigen::MatrixXd result = Eigen::MatrixXd(n, m); + result.setRandom(); // here we get uniform random values between -1 and 1 + Eigen::MatrixXd temp = result.array() + 1; // shift to positive + result = temp / 2.0; // divide to obtain 0/1 interval + // prevent numerical problems by enforcing a particular interval + result = result.array().max(1e-10); // eliminate too small values + result = result.array().min(1.0); // eliminiate too large values + return result; +} + +Eigen::MatrixXd box_muller(const Eigen::VectorXd& vRand) +{ + size_t n = vRand.rows(); + size_t m = n / 2; // Box-Muller transforms pairs of numbers - std::pair compute_spectrum(Eigen::VectorXd& data, int N) - { + Eigen::ArrayXd rand1 = vRand.head(m); + Eigen::ArrayXd rand2 = vRand.tail(m); - int N_data = data.rows(); + /* Implemented according to + * http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform + */ - if (N < N_data) - { - N = N_data; - } - N = static_cast(std::pow(2, std::ceil(std::log(N) / std::log(2)))); // map to nearest power of 2 + // enforce interval to avoid numerical issues + rand1 = rand1.max(1e-10); + rand1 = rand1.min(1.0); - Eigen::VectorXd padded_data = Eigen::VectorXd::Zero(N); - padded_data.head(N_data) = data; + rand1 = -2 * rand1.log(); + rand1 = rand1.sqrt(); // this is an amplitude - Eigen::FFT fft; + rand2 = rand2 * 2 * M_PI; // this is a random angle in the complex plane - // initialize the double vector from Eigen vector. This works by initializing - // with two pointers: 1) the first element of the data, 2) the last element of the data - std::vector vec_data(padded_data.data(), padded_data.data() + padded_data.rows() * padded_data.cols()); - std::vector > vec_result; - fft.fwd(vec_result, vec_data); // this is the forward-FFT, from time domain to Fourier domain + Eigen::MatrixXd result(2 * m, 1); + Eigen::MatrixXd res1 = (rand1 * rand2.cos()).matrix(); // first elements + Eigen::MatrixXd res2 = (rand1 * rand2.sin()).matrix(); // second elements + result << res1, res2; // combine the pairs into one vector - // map back to Eigen vector with the address of the first element and the size - Eigen::VectorXcd result = Eigen::Map(&vec_result[0], vec_result.size()); + return result; +} - // the low_index is the lowest useful frequency, depending on the number of actual datapoints - int low_index = static_cast(std::ceil(static_cast(N) / static_cast(N_data))); +Eigen::MatrixXd generate_normal_random_matrix(const size_t n, const size_t m) +{ + // if n*m is odd, we need one random number extra! + // therefore, we have to round up here. + size_t N = static_cast(std::ceil(n * m / 2.0)); + + Eigen::MatrixXd result(2 * N, 1); + // push random samples through the Box-Muller transform + result = box_muller(generate_uniform_random_matrix_0_1(2 * N, 1)); + result.conservativeResize(n, m); + return result; +} + +std::pair compute_spectrum(Eigen::VectorXd& data, int N) +{ - // prepare amplitudes and frequencies, don't return frequencies introduced by padding - Eigen::VectorXd spectrum = result.segment(low_index, N / 2 - low_index + 1).array().abs().pow(2); - Eigen::VectorXd frequencies = Eigen::VectorXd::LinSpaced(N / 2 - low_index + 1, low_index, N / 2); - frequencies /= N; + int N_data = data.rows(); - return std::make_pair(spectrum, frequencies); + if (N < N_data) + { + N = N_data; } + N = static_cast(std::pow(2, std::ceil(std::log(N) / std::log(2)))); // map to nearest power of 2 - Eigen::VectorXd hamming_window(int N) - { - double alpha = 0.54; - double beta = 0.46; + Eigen::VectorXd padded_data = Eigen::VectorXd::Zero(N); + padded_data.head(N_data) = data; - Eigen::VectorXd range = Eigen::VectorXd::LinSpaced(N, 0, 1); - Eigen::VectorXd window = alpha - beta * (2 * M_PI * range.array()).cos(); + Eigen::FFT fft; - return window; - } + // initialize the double vector from Eigen vector. This works by initializing + // with two pointers: 1) the first element of the data, 2) the last element of the data + std::vector vec_data(padded_data.data(), padded_data.data() + padded_data.rows() * padded_data.cols()); + std::vector> vec_result; + fft.fwd(vec_result, vec_data); // this is the forward-FFT, from time domain to Fourier domain - double stdandard_deviation(Eigen::VectorXd& input) { - Eigen::ArrayXd centered = input.array() - input.array().mean(); - return std::sqrt(centered.pow(2).sum()/(centered.size() - 1)); - } + // map back to Eigen vector with the address of the first element and the size + Eigen::VectorXcd result = Eigen::Map(&vec_result[0], vec_result.size()); + + // the low_index is the lowest useful frequency, depending on the number of actual datapoints + int low_index = static_cast(std::ceil(static_cast(N) / static_cast(N_data))); + // prepare amplitudes and frequencies, don't return frequencies introduced by padding + Eigen::VectorXd spectrum = result.segment(low_index, N / 2 - low_index + 1).array().abs().pow(2); + Eigen::VectorXd frequencies = Eigen::VectorXd::LinSpaced(N / 2 - low_index + 1, low_index, N / 2); + frequencies /= N; -} // namespace math_tools + return std::make_pair(spectrum, frequencies); +} +Eigen::VectorXd hamming_window(int N) +{ + double alpha = 0.54; + double beta = 0.46; + Eigen::VectorXd range = Eigen::VectorXd::LinSpaced(N, 0, 1); + Eigen::VectorXd window = alpha - beta * (2 * M_PI * range.array()).cos(); + return window; +} +double stdandard_deviation(Eigen::VectorXd& input) +{ + Eigen::ArrayXd centered = input.array() - input.array().mean(); + return std::sqrt(centered.pow(2).sum() / (centered.size() - 1)); +} +} // namespace math_tools diff --git a/contributions/MPI_IS_gaussian_process/tools/math_tools.h b/contributions/MPI_IS_gaussian_process/tools/math_tools.h index 04e7878fd..a60612968 100644 --- a/contributions/MPI_IS_gaussian_process/tools/math_tools.h +++ b/contributions/MPI_IS_gaussian_process/tools/math_tools.h @@ -56,106 +56,103 @@ // M_PI not part of the standard #ifndef M_PI -#define M_PI 3.14159265358979323846 -#endif +# define M_PI 3.14159265358979323846 +#endif namespace math_tools { - /*! - * The squareDistance is the pairwise distance between all points of the passed - * vectors. I.e. it generates a symmetric matrix when two vectors are passed. - * - * The first dimension (rows) is the dimensionality of the input space, the - * second dimension (columns) is the number of datapoints. The number of - * rows must be identical. - - @param a - a Matrix of size Dxn - - @param b - a Matrix of size Dxm - - @result - a Matrix of size nxm containing all pairwise squared distances - */ - Eigen::MatrixXd squareDistance( - const Eigen::MatrixXd& a, - const Eigen::MatrixXd& b); - - /*! - * Single-input version of squaredDistance. For single inputs, it is assumed - * that the pairwise distance matrix should be computed between the passed - * vector itself. - */ - Eigen::MatrixXd squareDistance(const Eigen::MatrixXd& a); - - /*! - * Generates a uniformly distributed random matrix of values between 0 and 1. - */ - Eigen::MatrixXd generate_uniform_random_matrix_0_1(const size_t n, const size_t m); - - /*! - * Apply the Box-Muller transform, which transforms uniform random samples - * to Gaussian distributed random samples. - */ - Eigen::MatrixXd box_muller(const Eigen::VectorXd& vRand); - - /*! - * Generates normal random samples. First it gets some uniform random samples - * and then uses the Box-Muller transform to get normal samples out of it - */ - Eigen::MatrixXd generate_normal_random_matrix(const size_t n, const size_t m); - - /*! - * Checks if a value is NaN by comparing it with itself. - * - * The rationale is that NaN is the only value that does not evaluate to true - * when comparing it with itself. - * Taken from: - * http://stackoverflow.com/questions/3437085/check-nan-number - * http://stackoverflow.com/questions/570669/checking-if-a-double-or-float-is-nan-in-c - */ - inline bool isNaN(double x) - { - return x != x; - } - - static const double NaN = std::numeric_limits::quiet_NaN(); - - /*! - * Checks if a double is infinite via std::numeric_limits functions. - * - * Taken from: - * http://bytes.com/topic/c/answers/588254-how-check-double-inf-nan#post2309761 - */ - inline bool isInf(double x) - { - return x == std::numeric_limits::infinity() || - x == -std::numeric_limits::infinity(); - } - - /*! - * Calculates the spectrum of a data vector. - * - * Does pre-and postprocessing: - * - The data is zero-padded until the desired resolution is reached. - * - ditfft2 is called to compute the FFT. - * - The frequencies from the padding are removed. - * - The constant coefficient is removed. - * - A list of frequencies is generated. - */ - std::pair< Eigen::VectorXd, Eigen::VectorXd > compute_spectrum(Eigen::VectorXd& data, int N = 0); - - /*! - * Computes a Hamming window (used to reduce spectral leakage of subsequent DFT). - */ - Eigen::VectorXd hamming_window(int N); - - /*! - * Computes the standard deviation of a vector... which is not part of Eigen. - */ - double stdandard_deviation(Eigen::VectorXd& input); - -} // namespace math_tools - -#endif // define GP_MATH_TOOLS_H +/*! + * The squareDistance is the pairwise distance between all points of the passed + * vectors. I.e. it generates a symmetric matrix when two vectors are passed. + * + * The first dimension (rows) is the dimensionality of the input space, the + * second dimension (columns) is the number of datapoints. The number of + * rows must be identical. + + @param a + a Matrix of size Dxn + + @param b + a Matrix of size Dxm + + @result + a Matrix of size nxm containing all pairwise squared distances +*/ +Eigen::MatrixXd squareDistance(const Eigen::MatrixXd& a, const Eigen::MatrixXd& b); + +/*! + * Single-input version of squaredDistance. For single inputs, it is assumed + * that the pairwise distance matrix should be computed between the passed + * vector itself. + */ +Eigen::MatrixXd squareDistance(const Eigen::MatrixXd& a); + +/*! + * Generates a uniformly distributed random matrix of values between 0 and 1. + */ +Eigen::MatrixXd generate_uniform_random_matrix_0_1(const size_t n, const size_t m); + +/*! + * Apply the Box-Muller transform, which transforms uniform random samples + * to Gaussian distributed random samples. + */ +Eigen::MatrixXd box_muller(const Eigen::VectorXd& vRand); + +/*! + * Generates normal random samples. First it gets some uniform random samples + * and then uses the Box-Muller transform to get normal samples out of it + */ +Eigen::MatrixXd generate_normal_random_matrix(const size_t n, const size_t m); + +/*! + * Checks if a value is NaN by comparing it with itself. + * + * The rationale is that NaN is the only value that does not evaluate to true + * when comparing it with itself. + * Taken from: + * http://stackoverflow.com/questions/3437085/check-nan-number + * http://stackoverflow.com/questions/570669/checking-if-a-double-or-float-is-nan-in-c + */ +inline bool isNaN(double x) +{ + return x != x; +} + +static const double NaN = std::numeric_limits::quiet_NaN(); + +/*! + * Checks if a double is infinite via std::numeric_limits functions. + * + * Taken from: + * http://bytes.com/topic/c/answers/588254-how-check-double-inf-nan#post2309761 + */ +inline bool isInf(double x) +{ + return x == std::numeric_limits::infinity() || x == -std::numeric_limits::infinity(); +} + +/*! + * Calculates the spectrum of a data vector. + * + * Does pre-and postprocessing: + * - The data is zero-padded until the desired resolution is reached. + * - ditfft2 is called to compute the FFT. + * - The frequencies from the padding are removed. + * - The constant coefficient is removed. + * - A list of frequencies is generated. + */ +std::pair compute_spectrum(Eigen::VectorXd& data, int N = 0); + +/*! + * Computes a Hamming window (used to reduce spectral leakage of subsequent DFT). + */ +Eigen::VectorXd hamming_window(int N); + +/*! + * Computes the standard deviation of a vector... which is not part of Eigen. + */ +double stdandard_deviation(Eigen::VectorXd& input); + +} // namespace math_tools + +#endif // define GP_MATH_TOOLS_H