| 1 | /* -*- mode: c++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ |
| 2 | |
| 3 | /* |
| 4 | Copyright (C) 2007 Marco Bianchetti |
| 5 | Copyright (C) 2007 François du Vignaud |
| 6 | Copyright (C) 2007 Giorgio Facchinetti |
| 7 | Copyright (C) 2012 Ralph Schreyer |
| 8 | Copyright (C) 2012 Mateusz Kapturski |
| 9 | |
| 10 | This file is part of QuantLib, a free-software/open-source library |
| 11 | for financial quantitative analysts and developers - http://quantlib.org/ |
| 12 | |
| 13 | QuantLib is free software: you can redistribute it and/or modify it |
| 14 | under the terms of the QuantLib license. You should have received a |
| 15 | copy of the license along with this program; if not, please email |
| 16 | <quantlib-dev@lists.sf.net>. The license is also available online at |
| 17 | <http://quantlib.org/license.shtml>. |
| 18 | |
| 19 | This program is distributed in the hope that it will be useful, but WITHOUT |
| 20 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
| 21 | FOR A PARTICULAR PURPOSE. See the license for more details. |
| 22 | */ |
| 23 | |
| 24 | #include "optimizers.hpp" |
| 25 | #include "utilities.hpp" |
| 26 | #include <ql/math/optimization/simplex.hpp> |
| 27 | #include <ql/math/optimization/levenbergmarquardt.hpp> |
| 28 | #include <ql/math/optimization/conjugategradient.hpp> |
| 29 | #include <ql/math/optimization/steepestdescent.hpp> |
| 30 | #include <ql/math/optimization/bfgs.hpp> |
| 31 | #include <ql/math/optimization/constraint.hpp> |
| 32 | #include <ql/math/optimization/costfunction.hpp> |
| 33 | #include <ql/math/randomnumbers/mt19937uniformrng.hpp> |
| 34 | #include <ql/math/optimization/differentialevolution.hpp> |
| 35 | #include <ql/math/optimization/goldstein.hpp> |
| 36 | |
| 37 | using namespace QuantLib; |
| 38 | using namespace boost::unit_test_framework; |
| 39 | |
| 40 | using std::pow; |
| 41 | using std::cos; |
| 42 | |
| 43 | namespace { |
| 44 | |
| 45 | struct NamedOptimizationMethod; |
| 46 | |
| 47 | std::vector<ext::shared_ptr<CostFunction> > costFunctions_; |
| 48 | std::vector<ext::shared_ptr<Constraint> > constraints_; |
| 49 | std::vector<Array> initialValues_; |
| 50 | std::vector<Size> maxIterations_, maxStationaryStateIterations_; |
| 51 | std::vector<Real> rootEpsilons_, functionEpsilons_, gradientNormEpsilons_; |
| 52 | std::vector<ext::shared_ptr<EndCriteria> > endCriterias_; |
| 53 | std::vector<std::vector<NamedOptimizationMethod> > optimizationMethods_; |
| 54 | std::vector<Array> xMinExpected_, yMinExpected_; |
| 55 | |
| 56 | class OneDimensionalPolynomialDegreeN : public CostFunction { |
| 57 | public: |
| 58 | explicit OneDimensionalPolynomialDegreeN(const Array& coefficients) |
| 59 | : coefficients_(coefficients), |
| 60 | polynomialDegree_(coefficients.size()-1) {} |
| 61 | |
| 62 | Real value(const Array& x) const override { |
| 63 | QL_REQUIRE(x.size()==1,"independent variable must be 1 dimensional" ); |
| 64 | Real y = 0; |
| 65 | for (Size i=0; i<=polynomialDegree_; ++i) |
| 66 | y += coefficients_[i]*std::pow(x: x[0],y: static_cast<int>(i)); |
| 67 | return y; |
| 68 | } |
| 69 | |
| 70 | Array values(const Array& x) const override { |
| 71 | QL_REQUIRE(x.size()==1,"independent variable must be 1 dimensional" ); |
| 72 | return Array(1, value(x)); |
| 73 | } |
| 74 | |
| 75 | private: |
| 76 | const Array coefficients_; |
| 77 | const Size polynomialDegree_; |
| 78 | }; |
| 79 | |
| 80 | |
| 81 | // The goal of this cost function is simply to call another optimization inside |
| 82 | // in order to test nested optimizations |
| 83 | class OptimizationBasedCostFunction : public CostFunction { |
| 84 | public: |
| 85 | Real value(const Array&) const override { return 1.0; } |
| 86 | |
| 87 | Array values(const Array&) const override { |
| 88 | // dummy nested optimization |
| 89 | Array coefficients(3, 1.0); |
| 90 | OneDimensionalPolynomialDegreeN oneDimensionalPolynomialDegreeN(coefficients); |
| 91 | NoConstraint constraint; |
| 92 | Array initialValues(1, 100.0); |
| 93 | Problem problem(oneDimensionalPolynomialDegreeN, constraint, |
| 94 | initialValues); |
| 95 | LevenbergMarquardt optimizationMethod; |
| 96 | //Simplex optimizationMethod(0.1); |
| 97 | //ConjugateGradient optimizationMethod; |
| 98 | //SteepestDescent optimizationMethod; |
| 99 | EndCriteria endCriteria(1000, 100, 1e-5, 1e-5, 1e-5); |
| 100 | optimizationMethod.minimize(P&: problem, endCriteria); |
| 101 | // return dummy result |
| 102 | return Array(1, 0); |
| 103 | } |
| 104 | }; |
| 105 | |
| 106 | |
| 107 | enum OptimizationMethodType {simplex, |
| 108 | levenbergMarquardt, |
| 109 | levenbergMarquardt2, |
| 110 | conjugateGradient, |
| 111 | conjugateGradient_goldstein, |
| 112 | steepestDescent, |
| 113 | steepestDescent_goldstein, |
| 114 | bfgs, |
| 115 | bfgs_goldstein}; |
| 116 | |
| 117 | std::string optimizationMethodTypeToString(OptimizationMethodType type) { |
| 118 | switch (type) { |
| 119 | case simplex: |
| 120 | return "Simplex" ; |
| 121 | case levenbergMarquardt: |
| 122 | return "Levenberg Marquardt" ; |
| 123 | case levenbergMarquardt2: |
| 124 | return "Levenberg Marquardt (cost function's jacbobian)" ; |
| 125 | case conjugateGradient: |
| 126 | return "Conjugate Gradient" ; |
| 127 | case steepestDescent: |
| 128 | return "Steepest Descent" ; |
| 129 | case bfgs: |
| 130 | return "BFGS" ; |
| 131 | case conjugateGradient_goldstein: |
| 132 | return "Conjugate Gradient (Goldstein line search)" ; |
| 133 | case steepestDescent_goldstein: |
| 134 | return "Steepest Descent (Goldstein line search)" ; |
| 135 | case bfgs_goldstein: |
| 136 | return "BFGS (Goldstein line search)" ; |
| 137 | default: |
| 138 | QL_FAIL("unknown OptimizationMethod type" ); |
| 139 | } |
| 140 | } |
| 141 | |
| 142 | struct NamedOptimizationMethod { |
| 143 | ext::shared_ptr<OptimizationMethod> optimizationMethod; |
| 144 | std::string name; |
| 145 | }; |
| 146 | |
| 147 | |
| 148 | ext::shared_ptr<OptimizationMethod> makeOptimizationMethod( |
| 149 | OptimizationMethodType optimizationMethodType, |
| 150 | Real simplexLambda, |
| 151 | Real levenbergMarquardtEpsfcn, |
| 152 | Real levenbergMarquardtXtol, |
| 153 | Real levenbergMarquardtGtol) { |
| 154 | switch (optimizationMethodType) { |
| 155 | case simplex: |
| 156 | return ext::shared_ptr<OptimizationMethod>( |
| 157 | new Simplex(simplexLambda)); |
| 158 | case levenbergMarquardt: |
| 159 | return ext::shared_ptr<OptimizationMethod>( |
| 160 | new LevenbergMarquardt(levenbergMarquardtEpsfcn, |
| 161 | levenbergMarquardtXtol, |
| 162 | levenbergMarquardtGtol)); |
| 163 | case levenbergMarquardt2: |
| 164 | return ext::shared_ptr<OptimizationMethod>( |
| 165 | new LevenbergMarquardt(levenbergMarquardtEpsfcn, |
| 166 | levenbergMarquardtXtol, |
| 167 | levenbergMarquardtGtol, |
| 168 | true)); |
| 169 | case conjugateGradient: |
| 170 | return ext::shared_ptr<OptimizationMethod>(new ConjugateGradient); |
| 171 | case steepestDescent: |
| 172 | return ext::shared_ptr<OptimizationMethod>(new SteepestDescent); |
| 173 | case bfgs: |
| 174 | return ext::shared_ptr<OptimizationMethod>(new BFGS); |
| 175 | case conjugateGradient_goldstein: |
| 176 | return ext::shared_ptr<OptimizationMethod>(new ConjugateGradient(ext::make_shared<GoldsteinLineSearch>())); |
| 177 | case steepestDescent_goldstein: |
| 178 | return ext::shared_ptr<OptimizationMethod>(new SteepestDescent(ext::make_shared<GoldsteinLineSearch>())); |
| 179 | case bfgs_goldstein: |
| 180 | return ext::shared_ptr<OptimizationMethod>(new BFGS(ext::make_shared<GoldsteinLineSearch>())); |
| 181 | default: |
| 182 | QL_FAIL("unknown OptimizationMethod type" ); |
| 183 | } |
| 184 | } |
| 185 | |
| 186 | |
| 187 | std::vector<NamedOptimizationMethod> makeOptimizationMethods( |
| 188 | const std::vector<OptimizationMethodType>& optimizationMethodTypes, |
| 189 | Real simplexLambda, |
| 190 | Real levenbergMarquardtEpsfcn, |
| 191 | Real levenbergMarquardtXtol, |
| 192 | Real levenbergMarquardtGtol) { |
| 193 | std::vector<NamedOptimizationMethod> results; |
| 194 | for (auto optimizationMethodType : optimizationMethodTypes) { |
| 195 | NamedOptimizationMethod namedOptimizationMethod; |
| 196 | namedOptimizationMethod.optimizationMethod = makeOptimizationMethod( |
| 197 | optimizationMethodType, simplexLambda, levenbergMarquardtEpsfcn, |
| 198 | levenbergMarquardtXtol, levenbergMarquardtGtol); |
| 199 | namedOptimizationMethod.name = optimizationMethodTypeToString(type: optimizationMethodType); |
| 200 | results.push_back(x: namedOptimizationMethod); |
| 201 | } |
| 202 | return results; |
| 203 | } |
| 204 | |
| 205 | Real maxDifference(const Array& a, const Array& b) { |
| 206 | Array diff = a-b; |
| 207 | Real maxDiff = 0.0; |
| 208 | for (Real i : diff) |
| 209 | maxDiff = std::max(a: maxDiff, b: std::fabs(x: i)); |
| 210 | return maxDiff; |
| 211 | } |
| 212 | |
| 213 | // Set up, for each cost function, all the ingredients for optimization: |
| 214 | // constraint, initial guess, end criteria, optimization methods. |
| 215 | void setup() { |
| 216 | |
| 217 | // Cost function n. 1: 1D polynomial of degree 2 (parabolic function y=a*x^2+b*x+c) |
| 218 | const Real a = 1; // required a > 0 |
| 219 | const Real b = 1; |
| 220 | const Real c = 1; |
| 221 | Array coefficients(3); |
| 222 | coefficients[0]= c; |
| 223 | coefficients[1]= b; |
| 224 | coefficients[2]= a; |
| 225 | costFunctions_.push_back(x: ext::shared_ptr<CostFunction>( |
| 226 | new OneDimensionalPolynomialDegreeN(coefficients))); |
| 227 | // Set constraint for optimizers: unconstrained problem |
| 228 | constraints_.push_back(x: ext::shared_ptr<Constraint>(new NoConstraint())); |
| 229 | // Set initial guess for optimizer |
| 230 | Array initialValue(1); |
| 231 | initialValue[0] = -100; |
| 232 | initialValues_.push_back(x: initialValue); |
| 233 | // Set end criteria for optimizer |
| 234 | maxIterations_.push_back(x: 10000); // maxIterations |
| 235 | maxStationaryStateIterations_.push_back(x: 100); // MaxStationaryStateIterations |
| 236 | rootEpsilons_.push_back(x: 1e-8); // rootEpsilon |
| 237 | functionEpsilons_.push_back(x: 1e-8); // functionEpsilon |
| 238 | gradientNormEpsilons_.push_back(x: 1e-8); // gradientNormEpsilon |
| 239 | endCriterias_.push_back(x: ext::make_shared<EndCriteria>( |
| 240 | args&: maxIterations_.back(), args&: maxStationaryStateIterations_.back(), |
| 241 | args&: rootEpsilons_.back(), args&: functionEpsilons_.back(), |
| 242 | args&: gradientNormEpsilons_.back())); |
| 243 | // Set optimization methods for optimizer |
| 244 | std::vector<OptimizationMethodType> optimizationMethodTypes = { |
| 245 | simplex, levenbergMarquardt, levenbergMarquardt2, conjugateGradient, |
| 246 | bfgs //, steepestDescent |
| 247 | }; |
| 248 | Real simplexLambda = 0.1; // characteristic search length for simplex |
| 249 | Real levenbergMarquardtEpsfcn = 1.0e-8; // parameters specific for Levenberg-Marquardt |
| 250 | Real levenbergMarquardtXtol = 1.0e-8; // |
| 251 | Real levenbergMarquardtGtol = 1.0e-8; // |
| 252 | optimizationMethods_.push_back(x: makeOptimizationMethods( |
| 253 | optimizationMethodTypes, |
| 254 | simplexLambda, levenbergMarquardtEpsfcn, levenbergMarquardtXtol, |
| 255 | levenbergMarquardtGtol)); |
| 256 | // Set expected results for optimizer |
| 257 | Array xMinExpected(1),yMinExpected(1); |
| 258 | xMinExpected[0] = -b/(2.0*a); |
| 259 | yMinExpected[0] = -(b*b-4.0*a*c)/(4.0*a); |
| 260 | xMinExpected_.push_back(x: xMinExpected); |
| 261 | yMinExpected_.push_back(x: yMinExpected); |
| 262 | } |
| 263 | |
| 264 | } |
| 265 | |
| 266 | |
| 267 | void OptimizersTest::test() { |
| 268 | BOOST_TEST_MESSAGE("Testing optimizers..." ); |
| 269 | |
| 270 | setup(); |
| 271 | |
| 272 | // Loop over problems (currently there is only 1 problem) |
| 273 | for (Size i=0; i<costFunctions_.size(); ++i) { |
| 274 | Problem problem(*costFunctions_[i], *constraints_[i], |
| 275 | initialValues_[i]); |
| 276 | Array initialValues = problem.currentValue(); |
| 277 | // Loop over optimizers |
| 278 | for (Size j=0; j<(optimizationMethods_[i]).size(); ++j) { |
| 279 | Real rootEpsilon = endCriterias_[i]->rootEpsilon(); |
| 280 | Size endCriteriaTests = 1; |
| 281 | // Loop over rootEpsilon |
| 282 | for (Size k=0; k<endCriteriaTests; ++k) { |
| 283 | problem.setCurrentValue(initialValues); |
| 284 | EndCriteria endCriteria( |
| 285 | endCriterias_[i]->maxIterations(), |
| 286 | endCriterias_[i]->maxStationaryStateIterations(), |
| 287 | rootEpsilon, |
| 288 | endCriterias_[i]->functionEpsilon(), |
| 289 | endCriterias_[i]->gradientNormEpsilon()); |
| 290 | rootEpsilon *= .1; |
| 291 | EndCriteria::Type endCriteriaResult = |
| 292 | optimizationMethods_[i][j].optimizationMethod->minimize( |
| 293 | P&: problem, endCriteria); |
| 294 | Array xMinCalculated = problem.currentValue(); |
| 295 | Array yMinCalculated = problem.values(x: xMinCalculated); |
| 296 | |
| 297 | // Check optimization results vs known solution |
| 298 | bool completed; |
| 299 | switch (endCriteriaResult) { |
| 300 | case EndCriteria::None: |
| 301 | case EndCriteria::MaxIterations: |
| 302 | case EndCriteria::Unknown: |
| 303 | completed = false; |
| 304 | break; |
| 305 | default: |
| 306 | completed = true; |
| 307 | } |
| 308 | |
| 309 | Real xError = maxDifference(a: xMinCalculated,b: xMinExpected_[i]); |
| 310 | Real yError = maxDifference(a: yMinCalculated,b: yMinExpected_[i]); |
| 311 | |
| 312 | bool correct = (xError <= endCriteria.rootEpsilon() || |
| 313 | yError <= endCriteria.functionEpsilon()); |
| 314 | |
| 315 | if ((!completed) || (!correct)) |
| 316 | BOOST_ERROR("costFunction # = " << i << |
| 317 | "\nOptimizer: " << |
| 318 | optimizationMethods_[i][j].name << |
| 319 | "\n function evaluations: " << |
| 320 | problem.functionEvaluation() << |
| 321 | "\n gradient evaluations: " << |
| 322 | problem.gradientEvaluation() << |
| 323 | "\n x expected: " << |
| 324 | xMinExpected_[i] << |
| 325 | "\n x calculated: " << |
| 326 | std::setprecision(9) << xMinCalculated << |
| 327 | "\n x difference: " << |
| 328 | xMinExpected_[i]- xMinCalculated << |
| 329 | "\n rootEpsilon: " << |
| 330 | std::setprecision(9) << |
| 331 | endCriteria.rootEpsilon() << |
| 332 | "\n y expected: " << |
| 333 | yMinExpected_[i] << |
| 334 | "\n y calculated: " << |
| 335 | std::setprecision(9) << yMinCalculated << |
| 336 | "\n y difference: " << |
| 337 | yMinExpected_[i]- yMinCalculated << |
| 338 | "\n functionEpsilon: " << |
| 339 | std::setprecision(9) << |
| 340 | endCriteria.functionEpsilon() << |
| 341 | "\n endCriteriaResult: " << |
| 342 | endCriteriaResult); |
| 343 | } |
| 344 | } |
| 345 | } |
| 346 | } |
| 347 | |
| 348 | |
| 349 | void OptimizersTest::nestedOptimizationTest() { |
| 350 | BOOST_TEST_MESSAGE("Testing nested optimizations..." ); |
| 351 | OptimizationBasedCostFunction optimizationBasedCostFunction; |
| 352 | NoConstraint constraint; |
| 353 | Array initialValues(1, 0.0); |
| 354 | Problem problem(optimizationBasedCostFunction, constraint, |
| 355 | initialValues); |
| 356 | LevenbergMarquardt optimizationMethod; |
| 357 | //Simplex optimizationMethod(0.1); |
| 358 | //ConjugateGradient optimizationMethod; |
| 359 | //SteepestDescent optimizationMethod; |
| 360 | EndCriteria endCriteria(1000, 100, 1e-5, 1e-5, 1e-5); |
| 361 | optimizationMethod.minimize(P&: problem, endCriteria); |
| 362 | |
| 363 | } |
| 364 | |
| 365 | namespace { |
| 366 | |
| 367 | class FirstDeJong : public CostFunction { |
| 368 | public: |
| 369 | Array values(const Array& x) const override { |
| 370 | return Array(x.size(),value(x)); |
| 371 | } |
| 372 | Real value(const Array& x) const override { return DotProduct(v1: x, v2: x); } |
| 373 | }; |
| 374 | |
| 375 | class SecondDeJong : public CostFunction { |
| 376 | public: |
| 377 | Array values(const Array& x) const override { |
| 378 | return Array(x.size(),value(x)); |
| 379 | } |
| 380 | Real value(const Array& x) const override { |
| 381 | return 100.0*(x[0]*x[0]-x[1])*(x[0]*x[0]-x[1]) |
| 382 | + (1.0-x[0])*(1.0-x[0]); |
| 383 | } |
| 384 | }; |
| 385 | |
| 386 | class ModThirdDeJong : public CostFunction { |
| 387 | public: |
| 388 | Array values(const Array& x) const override { |
| 389 | return Array(x.size(),value(x)); |
| 390 | } |
| 391 | Real value(const Array& x) const override { |
| 392 | Real fx = 0.0; |
| 393 | for (Real i : x) { |
| 394 | fx += std::floor(x: i) * std::floor(x: i); |
| 395 | } |
| 396 | return fx; |
| 397 | } |
| 398 | }; |
| 399 | |
| 400 | class ModFourthDeJong : public CostFunction { |
| 401 | public: |
| 402 | ModFourthDeJong() |
| 403 | : uniformRng_(MersenneTwisterUniformRng(4711)) { |
| 404 | } |
| 405 | Array values(const Array& x) const override { |
| 406 | return Array(x.size(),value(x)); |
| 407 | } |
| 408 | Real value(const Array& x) const override { |
| 409 | Real fx = 0.0; |
| 410 | for (Size i=0; i<x.size(); ++i) { |
| 411 | fx += (i+1.0)*pow(x: x[i],y: 4.0) + uniformRng_.nextReal(); |
| 412 | } |
| 413 | return fx; |
| 414 | } |
| 415 | MersenneTwisterUniformRng uniformRng_; |
| 416 | }; |
| 417 | |
| 418 | class Griewangk : public CostFunction { |
| 419 | public: |
| 420 | Array values(const Array& x) const override { |
| 421 | return Array(x.size(),value(x)); |
| 422 | } |
| 423 | Real value(const Array& x) const override { |
| 424 | Real fx = 0.0; |
| 425 | for (Real i : x) { |
| 426 | fx += i * i / 4000.0; |
| 427 | } |
| 428 | Real p = 1.0; |
| 429 | for (Size i=0; i<x.size(); ++i) { |
| 430 | p *= cos(x: x[i]/sqrt(x: i+1.0)); |
| 431 | } |
| 432 | return fx - p + 1.0; |
| 433 | } |
| 434 | }; |
| 435 | } |
| 436 | |
| 437 | void OptimizersTest::testDifferentialEvolution() { |
| 438 | BOOST_TEST_MESSAGE("Testing differential evolution..." ); |
| 439 | |
| 440 | /* Note: |
| 441 | * |
| 442 | * The "ModFourthDeJong" doesn't have a well defined optimum because |
| 443 | * of its noisy part. It just has to be <= 15 in our example. |
| 444 | * The concrete value might differ for a different input and |
| 445 | * different random numbers. |
| 446 | * |
| 447 | * The "Griewangk" function is an example where the adaptive |
| 448 | * version of DifferentialEvolution turns out to be more successful. |
| 449 | */ |
| 450 | |
| 451 | DifferentialEvolution::Configuration conf = |
| 452 | DifferentialEvolution::Configuration() |
| 453 | .withStepsizeWeight(w: 0.4) |
| 454 | .withBounds() |
| 455 | .withCrossoverProbability(p: 0.35) |
| 456 | .withPopulationMembers(n: 500) |
| 457 | .withStrategy(s: DifferentialEvolution::BestMemberWithJitter) |
| 458 | .withCrossoverType(t: DifferentialEvolution::Normal) |
| 459 | .withAdaptiveCrossover() |
| 460 | .withSeed(s: 3242); |
| 461 | DifferentialEvolution deOptim(conf); |
| 462 | |
| 463 | DifferentialEvolution::Configuration conf2 = |
| 464 | DifferentialEvolution::Configuration() |
| 465 | .withStepsizeWeight(w: 1.8) |
| 466 | .withBounds() |
| 467 | .withCrossoverProbability(p: 0.9) |
| 468 | .withPopulationMembers(n: 1000) |
| 469 | .withStrategy(s: DifferentialEvolution::Rand1SelfadaptiveWithRotation) |
| 470 | .withCrossoverType(t: DifferentialEvolution::Normal) |
| 471 | .withAdaptiveCrossover() |
| 472 | .withSeed(s: 3242); |
| 473 | DifferentialEvolution deOptim2(conf2); |
| 474 | |
| 475 | std::vector<DifferentialEvolution > diffEvolOptimisers = { |
| 476 | deOptim, |
| 477 | deOptim, |
| 478 | deOptim, |
| 479 | deOptim, |
| 480 | deOptim2 |
| 481 | }; |
| 482 | |
| 483 | std::vector<ext::shared_ptr<CostFunction> > costFunctions = { |
| 484 | ext::shared_ptr<CostFunction>(new FirstDeJong), |
| 485 | ext::shared_ptr<CostFunction>(new SecondDeJong), |
| 486 | ext::shared_ptr<CostFunction>(new ModThirdDeJong), |
| 487 | ext::shared_ptr<CostFunction>(new ModFourthDeJong), |
| 488 | ext::shared_ptr<CostFunction>(new Griewangk) |
| 489 | }; |
| 490 | |
| 491 | std::vector<BoundaryConstraint> constraints = { |
| 492 | {-10.0, 10.0}, |
| 493 | {-10.0, 10.0}, |
| 494 | {-10.0, 10.0}, |
| 495 | {-10.0, 10.0}, |
| 496 | {-600.0, 600.0} |
| 497 | }; |
| 498 | |
| 499 | std::vector<Array> initialValues = { |
| 500 | Array(3, 5.0), |
| 501 | Array(2, 5.0), |
| 502 | Array(5, 5.0), |
| 503 | Array(30, 5.0), |
| 504 | Array(10, 100.0) |
| 505 | }; |
| 506 | |
| 507 | std::vector<EndCriteria> endCriteria = { |
| 508 | {100, 10, 1e-10, 1e-8, Null<Real>()}, |
| 509 | {100, 10, 1e-10, 1e-8, Null<Real>()}, |
| 510 | {100, 10, 1e-10, 1e-8, Null<Real>()}, |
| 511 | {500, 100, 1e-10, 1e-8, Null<Real>()}, |
| 512 | {1000, 800, 1e-12, 1e-10, Null<Real>()} |
| 513 | }; |
| 514 | |
| 515 | std::vector<Real> minima = { |
| 516 | 0.0, |
| 517 | 0.0, |
| 518 | 0.0, |
| 519 | 10.9639796558, |
| 520 | 0.0 |
| 521 | }; |
| 522 | |
| 523 | for (Size i = 0; i < costFunctions.size(); ++i) { |
| 524 | Problem problem(*costFunctions[i], constraints[i], initialValues[i]); |
| 525 | diffEvolOptimisers[i].minimize(p&: problem, endCriteria: endCriteria[i]); |
| 526 | |
| 527 | if (i != 3) { |
| 528 | // stable |
| 529 | if (std::fabs(x: problem.functionValue() - minima[i]) > 1e-8) { |
| 530 | BOOST_ERROR("costFunction # " << i |
| 531 | << "\ncalculated: " << problem.functionValue() |
| 532 | << "\nexpected: " << minima[i]); |
| 533 | } |
| 534 | } else { |
| 535 | // this case is unstable due to randomness; we're good as |
| 536 | // long as the result is below 15 |
| 537 | if (problem.functionValue() > 15) { |
| 538 | BOOST_ERROR("costFunction # " << i |
| 539 | << "\ncalculated: " << problem.functionValue() |
| 540 | << "\nexpected: " << "less than 15" ); |
| 541 | } |
| 542 | } |
| 543 | } |
| 544 | } |
| 545 | |
| 546 | test_suite* OptimizersTest::suite(SpeedLevel speed) { |
| 547 | auto* suite = BOOST_TEST_SUITE("Optimizers tests" ); |
| 548 | |
| 549 | suite->add(QUANTLIB_TEST_CASE(&OptimizersTest::test)); |
| 550 | suite->add(QUANTLIB_TEST_CASE(&OptimizersTest::nestedOptimizationTest)); |
| 551 | |
| 552 | if (speed <= Fast) { |
| 553 | suite->add(QUANTLIB_TEST_CASE( |
| 554 | &OptimizersTest::testDifferentialEvolution)); |
| 555 | } |
| 556 | |
| 557 | return suite; |
| 558 | } |
| 559 | |
| 560 | |