View Javadoc
1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one or more
3    * contributor license agreements.  See the NOTICE file distributed with
4    * this work for additional information regarding copyright ownership.
5    * The ASF licenses this file to You under the Apache License, Version 2.0
6    * (the "License"); you may not use this file except in compliance with
7    * the License.  You may obtain a copy of the License at
8    *
9    *      http://www.apache.org/licenses/LICENSE-2.0
10   *
11   * Unless required by applicable law or agreed to in writing, software
12   * distributed under the License is distributed on an "AS IS" BASIS,
13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14   * See the License for the specific language governing permissions and
15   * limitations under the License.
16   */
17  
18  package org.apache.commons.math4.legacy.optim.nonlinear.scalar.noderiv;
19  
20  import java.util.ArrayList;
21  import java.util.Arrays;
22  import java.util.List;
23  
24  import org.apache.commons.math4.legacy.analysis.MultivariateFunction;
25  import org.apache.commons.math4.legacy.exception.DimensionMismatchException;
26  import org.apache.commons.math4.legacy.exception.NotStrictlyPositiveException;
27  import org.apache.commons.math4.legacy.exception.OutOfRangeException;
28  import org.apache.commons.math4.legacy.exception.TooManyEvaluationsException;
29  import org.apache.commons.math4.legacy.linear.Array2DRowRealMatrix;
30  import org.apache.commons.math4.legacy.linear.EigenDecomposition;
31  import org.apache.commons.math4.legacy.linear.MatrixUtils;
32  import org.apache.commons.math4.legacy.linear.RealMatrix;
33  import org.apache.commons.math4.legacy.optim.ConvergenceChecker;
34  import org.apache.commons.math4.legacy.optim.OptimizationData;
35  import org.apache.commons.math4.legacy.optim.PointValuePair;
36  import org.apache.commons.math4.legacy.optim.nonlinear.scalar.GoalType;
37  import org.apache.commons.math4.legacy.optim.nonlinear.scalar.PopulationSize;
38  import org.apache.commons.math4.legacy.optim.nonlinear.scalar.Sigma;
39  import org.apache.commons.math4.legacy.optim.nonlinear.scalar.MultivariateOptimizer;
40  import org.apache.commons.rng.UniformRandomProvider;
41  import org.apache.commons.statistics.distribution.ContinuousDistribution;
42  import org.apache.commons.statistics.distribution.NormalDistribution;
43  import org.apache.commons.math4.core.jdkmath.JdkMath;
44  
45  /**
46   * An implementation of the active Covariance Matrix Adaptation Evolution Strategy (CMA-ES)
47   * for non-linear, non-convex, non-smooth, global function minimization.
48   * <p>
49   * The CMA-Evolution Strategy (CMA-ES) is a reliable stochastic optimization method
50   * which should be applied if derivative-based methods, e.g. quasi-Newton BFGS or
51   * conjugate gradient, fail due to a rugged search landscape (e.g. noise, local
52   * optima, outlier, etc.) of the objective function. Like a
53   * quasi-Newton method, the CMA-ES learns and applies a variable metric
54   * on the underlying search space. Unlike a quasi-Newton method, the
55   * CMA-ES neither estimates nor uses gradients, making it considerably more
56   * reliable in terms of finding a good, or even close to optimal, solution.
57   * <p>
58   * In general, on smooth objective functions the CMA-ES is roughly ten times
59   * slower than BFGS (counting objective function evaluations, no gradients provided).
60   * For up to <code>N=10</code> variables also the derivative-free simplex
61   * direct search method (Nelder and Mead) can be faster, but it is
62   * far less reliable than CMA-ES.
63   * <p>
64   * The CMA-ES is particularly well suited for non-separable
65   * and/or badly conditioned problems. To observe the advantage of CMA compared
66   * to a conventional evolution strategy, it will usually take about
67   * <code>30 N</code> function evaluations. On difficult problems the complete
68   * optimization (a single run) is expected to take <em>roughly</em> between
69   * <code>30 N</code> and <code>300 N<sup>2</sup></code>
70   * function evaluations.
71   * <p>
72   * This implementation is translated and adapted from the Matlab version
73   * of the CMA-ES algorithm as implemented in module {@code cmaes.m} version 3.51.
74   * <p>
75   * For more information, please refer to the following links:
76   * <ul>
77   *  <li><a href="http://www.lri.fr/~hansen/cmaes.m">Matlab code</a></li>
78   *  <li><a href="http://www.lri.fr/~hansen/cmaesintro.html">Introduction to CMA-ES</a></li>
79   *  <li><a href="http://en.wikipedia.org/wiki/CMA-ES">Wikipedia</a></li>
80   * </ul>
81   *
82   * @since 3.0
83   */
84  public class CMAESOptimizer
85      extends MultivariateOptimizer {
86      // global search parameters
87      /**
88       * Population size, offspring number. The primary strategy parameter to play
89       * with, which can be increased from its default value. Increasing the
90       * population size improves global search properties in exchange to speed.
91       * Speed decreases, as a rule, at most linearly with increasing population
92       * size. It is advisable to begin with the default small population size.
93       */
94      private int lambda; // population size
95      /**
96       * Covariance update mechanism, default is active CMA. isActiveCMA = true
97       * turns on "active CMA" with a negative update of the covariance matrix and
98       * checks for positive definiteness. OPTS.CMA.active = 2 does not check for
99       * pos. def. and is numerically faster. Active CMA usually speeds up the
100      * adaptation.
101      */
102     private final boolean isActiveCMA;
103     /**
104      * Determines how often a new random offspring is generated in case it is
105      * not feasible / beyond the defined limits, default is 0.
106      */
107     private final int checkFeasableCount;
108     /**
109      * @see Sigma
110      */
111     private double[] inputSigma;
112     /** Number of objective variables/problem dimension. */
113     private int dimension;
114     /**
115      * Defines the number of initial iterations, where the covariance matrix
116      * remains diagonal and the algorithm has internally linear time complexity.
117      * diagonalOnly = 1 means keeping the covariance matrix always diagonal and
118      * this setting also exhibits linear space complexity. This can be
119      * particularly useful for dimension > 100.
120      * @see <a href="http://hal.archives-ouvertes.fr/inria-00287367/en">A Simple Modification in CMA-ES</a>
121      */
122     private int diagonalOnly;
123     /** Number of objective variables/problem dimension. */
124     private boolean isMinimize = true;
125     /** Indicates whether statistic data is collected. */
126     private final boolean generateStatistics;
127 
128     // termination criteria
129     /** Maximal number of iterations allowed. */
130     private final int maxIterations;
131     /** Limit for fitness value. */
132     private final double stopFitness;
133     /** Stop if x-changes larger stopTolUpX. */
134     private double stopTolUpX;
135     /** Stop if x-change smaller stopTolX. */
136     private double stopTolX;
137     /** Stop if fun-changes smaller stopTolFun. */
138     private double stopTolFun;
139     /** Stop if back fun-changes smaller stopTolHistFun. */
140     private double stopTolHistFun;
141 
142     // selection strategy parameters
143     /** Number of parents/points for recombination. */
144     private int mu; //
145     /** log(mu + 0.5), stored for efficiency. */
146     private double logMu2;
147     /** Array for weighted recombination. */
148     private RealMatrix weights;
149     /** Variance-effectiveness of sum w_i x_i. */
150     private double mueff; //
151 
152     // dynamic strategy parameters and constants
153     /** Overall standard deviation - search volume. */
154     private double sigma;
155     /** Cumulation constant. */
156     private double cc;
157     /** Cumulation constant for step-size. */
158     private double cs;
159     /** Damping for step-size. */
160     private double damps;
161     /** Learning rate for rank-one update. */
162     private double ccov1;
163     /** Learning rate for rank-mu update'. */
164     private double ccovmu;
165     /** Expectation of ||N(0,I)|| == norm(randn(N,1)). */
166     private double chiN;
167     /** Learning rate for rank-one update - diagonalOnly. */
168     private double ccov1Sep;
169     /** Learning rate for rank-mu update - diagonalOnly. */
170     private double ccovmuSep;
171 
172     // CMA internal values - updated each generation
173     /** Objective variables. */
174     private RealMatrix xmean;
175     /** Evolution path. */
176     private RealMatrix pc;
177     /** Evolution path for sigma. */
178     private RealMatrix ps;
179     /** Norm of ps, stored for efficiency. */
180     private double normps;
181     /** Coordinate system. */
182     private RealMatrix B;
183     /** Scaling. */
184     private RealMatrix D;
185     /** B*D, stored for efficiency. */
186     private RealMatrix BD;
187     /** Diagonal of sqrt(D), stored for efficiency. */
188     private RealMatrix diagD;
189     /** Covariance matrix. */
190     private RealMatrix C;
191     /** Diagonal of C, used for diagonalOnly. */
192     private RealMatrix diagC;
193     /** Number of iterations already performed. */
194     private int iterations;
195 
196     /** History queue of best values. */
197     private double[] fitnessHistory;
198     /** Size of history queue of best values. */
199     private int historySize;
200 
201     /** Gaussian sampler. */
202     private final ContinuousDistribution.Sampler random;
203 
204     /** History of sigma values. */
205     private final List<Double> statisticsSigmaHistory = new ArrayList<>();
206     /** History of mean matrix. */
207     private final List<RealMatrix> statisticsMeanHistory = new ArrayList<>();
208     /** History of fitness values. */
209     private final List<Double> statisticsFitnessHistory = new ArrayList<>();
210     /** History of D matrix. */
211     private final List<RealMatrix> statisticsDHistory = new ArrayList<>();
212 
213     /**
214      * @param maxIterations Maximal number of iterations.
215      * @param stopFitness Whether to stop if objective function value is smaller than
216      * {@code stopFitness}.
217      * @param isActiveCMA Chooses the covariance matrix update method.
218      * @param diagonalOnly Number of initial iterations, where the covariance matrix
219      * remains diagonal.
220      * @param checkFeasableCount Determines how often new random objective variables are
221      * generated in case they are out of bounds.
222      * @param rng Random generator.
223      * @param generateStatistics Whether statistic data is collected.
224      * @param checker Convergence checker.
225      *
226      * @since 3.1
227      */
228     public CMAESOptimizer(int maxIterations,
229                           double stopFitness,
230                           boolean isActiveCMA,
231                           int diagonalOnly,
232                           int checkFeasableCount,
233                           UniformRandomProvider rng,
234                           boolean generateStatistics,
235                           ConvergenceChecker<PointValuePair> checker) {
236         super(checker);
237         this.maxIterations = maxIterations;
238         this.stopFitness = stopFitness;
239         this.isActiveCMA = isActiveCMA;
240         this.diagonalOnly = diagonalOnly;
241         this.checkFeasableCount = Math.max(0, checkFeasableCount);
242         this.random = NormalDistribution.of(0, 1).createSampler(rng);
243         this.generateStatistics = generateStatistics;
244     }
245 
246     /**
247      * @return History of sigma values.
248      */
249     public List<Double> getStatisticsSigmaHistory() {
250         return statisticsSigmaHistory;
251     }
252 
253     /**
254      * @return History of mean matrix.
255      */
256     public List<RealMatrix> getStatisticsMeanHistory() {
257         return statisticsMeanHistory;
258     }
259 
260     /**
261      * @return History of fitness values.
262      */
263     public List<Double> getStatisticsFitnessHistory() {
264         return statisticsFitnessHistory;
265     }
266 
267     /**
268      * @return History of D matrix.
269      */
270     public List<RealMatrix> getStatisticsDHistory() {
271         return statisticsDHistory;
272     }
273 
274     /**
275      * {@inheritDoc}
276      *
277      * @param optData Optimization data. In addition to those documented in
278      * {@link MultivariateOptimizer#parseOptimizationData(OptimizationData[])
279      * MultivariateOptimizer}, this method will register the following data:
280      * <ul>
281      *  <li>
282      *   {@link Sigma} values define the initial coordinate-wise standard
283      *   deviations for sampling new search points around the initial guess.
284      *   It is suggested to set them to the estimated distance from the
285      *   initial to the desired optimum.
286      *   Small values induce the search to be more local (and very small
287      *   values are more likely to find a local optimum close to the initial
288      *   guess).
289      *   Too small values might however lead to early termination.
290      *  </li>
291      *  <li>
292      *   {@link PopulationSize} is the number of offsprings and the primary
293      *   strategy parameter.
294      *   In the absence of better clues, a good default could be an integer
295      *   close to {@code 4 + 3 ln(n)}, where {@code n} is the number of
296      *   optimized parameters.
297      *   Increasing the population size improves global search properties at
298      *   the expense of speed (which in general decreases at most linearly
299      *   with increasing population size).
300      *  </li>
301      * </ul>
302      * @return {@inheritDoc}
303      * @throws TooManyEvaluationsException if the maximal number of evaluations
304      * is exceeded.
305      * @throws DimensionMismatchException if the initial guess, target, and
306      * weight arguments have inconsistent dimensions.
307      */
308     @Override
309     public PointValuePair optimize(OptimizationData... optData) {
310         // Set up base class and perform computation.
311         return super.optimize(optData);
312     }
313 
314     /** {@inheritDoc} */
315     @Override
316     protected PointValuePair doOptimize() {
317          // -------------------- Initialization --------------------------------
318         isMinimize = getGoalType().equals(GoalType.MINIMIZE);
319         final FitnessFunction fitfun = new FitnessFunction();
320         final double[] guess = getStartPoint();
321         // number of objective variables/problem dimension
322         dimension = guess.length;
323         initializeCMA(guess);
324         iterations = 0;
325         ValuePenaltyPair valuePenalty = fitfun.value(guess);
326         double bestValue = valuePenalty.value+valuePenalty.penalty;
327         push(fitnessHistory, bestValue);
328         PointValuePair optimum
329             = new PointValuePair(getStartPoint(),
330                                  isMinimize ? bestValue : -bestValue);
331         PointValuePair lastResult = null;
332 
333         // -------------------- Generation Loop --------------------------------
334         generationLoop:
335         for (iterations = 1; iterations <= maxIterations; iterations++) {
336             incrementIterationCount();
337 
338             // Generate and evaluate lambda offspring
339             final RealMatrix arz = randn1(dimension, lambda);
340             final RealMatrix arx = zeros(dimension, lambda);
341             final double[] fitness = new double[lambda];
342             final ValuePenaltyPair[] valuePenaltyPairs = new ValuePenaltyPair[lambda];
343             // generate random offspring
344             for (int k = 0; k < lambda; k++) {
345                 RealMatrix arxk = null;
346                 for (int i = 0; i <= checkFeasableCount; i++) {
347                     if (diagonalOnly <= 0) {
348                         arxk = xmean.add(BD.multiply(arz.getColumnMatrix(k))
349                                          .scalarMultiply(sigma)); // m + sig * Normal(0,C)
350                     } else {
351                         arxk = xmean.add(times(diagD,arz.getColumnMatrix(k))
352                                          .scalarMultiply(sigma));
353                     }
354                     if (i >= checkFeasableCount ||
355                         fitfun.isFeasible(arxk.getColumn(0))) {
356                         break;
357                     }
358                     // regenerate random arguments for row
359                     arz.setColumn(k, randn(dimension));
360                 }
361                 copyColumn(arxk, 0, arx, k);
362                 try {
363                     valuePenaltyPairs[k] = fitfun.value(arx.getColumn(k)); // compute fitness
364                 } catch (TooManyEvaluationsException e) {
365                     break generationLoop;
366                 }
367             }
368             // Compute fitnesses by adding value and penalty after scaling by value range.
369             double valueRange = valueRange(valuePenaltyPairs);
370             for (int iValue=0;iValue<valuePenaltyPairs.length;iValue++) {
371                  fitness[iValue] = valuePenaltyPairs[iValue].value + valuePenaltyPairs[iValue].penalty*valueRange;
372             }
373             // Sort by fitness and compute weighted mean into xmean
374             final int[] arindex = sortedIndices(fitness);
375             // Calculate new xmean, this is selection and recombination
376             final RealMatrix xold = xmean; // for speed up of Eq. (2) and (3)
377             final RealMatrix bestArx = selectColumns(arx, Arrays.copyOf(arindex, mu));
378             xmean = bestArx.multiply(weights);
379             final RealMatrix bestArz = selectColumns(arz, Arrays.copyOf(arindex, mu));
380             final RealMatrix zmean = bestArz.multiply(weights);
381             final boolean hsig = updateEvolutionPaths(zmean, xold);
382             if (diagonalOnly <= 0) {
383                 updateCovariance(hsig, bestArx, arz, arindex, xold);
384             } else {
385                 updateCovarianceDiagonalOnly(hsig, bestArz);
386             }
387             // Adapt step size sigma - Eq. (5)
388             sigma *= JdkMath.exp(JdkMath.min(1, (normps/chiN - 1) * cs / damps));
389             final double bestFitness = fitness[arindex[0]];
390             final double worstFitness = fitness[arindex[arindex.length - 1]];
391             if (bestValue > bestFitness) {
392                 bestValue = bestFitness;
393                 lastResult = optimum;
394                 optimum = new PointValuePair(fitfun.repair(bestArx.getColumn(0)),
395                                              isMinimize ? bestFitness : -bestFitness);
396                 if (getConvergenceChecker() != null && lastResult != null &&
397                     getConvergenceChecker().converged(iterations, optimum, lastResult)) {
398                     break generationLoop;
399                 }
400             }
401             // handle termination criteria
402             // Break, if fitness is good enough
403             if (stopFitness != 0 && bestFitness < (isMinimize ? stopFitness : -stopFitness)) {
404                 break generationLoop;
405             }
406             final double[] sqrtDiagC = sqrt(diagC).getColumn(0);
407             final double[] pcCol = pc.getColumn(0);
408             for (int i = 0; i < dimension; i++) {
409                 if (sigma * JdkMath.max(JdkMath.abs(pcCol[i]), sqrtDiagC[i]) > stopTolX) {
410                     break;
411                 }
412                 if (i >= dimension - 1) {
413                     break generationLoop;
414                 }
415             }
416             for (int i = 0; i < dimension; i++) {
417                 if (sigma * sqrtDiagC[i] > stopTolUpX) {
418                     break generationLoop;
419                 }
420             }
421             final double historyBest = min(fitnessHistory);
422             final double historyWorst = max(fitnessHistory);
423             if (iterations > 2 &&
424                 JdkMath.max(historyWorst, worstFitness) -
425                 JdkMath.min(historyBest, bestFitness) < stopTolFun) {
426                 break generationLoop;
427             }
428             if (iterations > fitnessHistory.length &&
429                 historyWorst - historyBest < stopTolHistFun) {
430                 break generationLoop;
431             }
432             // condition number of the covariance matrix exceeds 1e14
433             if (max(diagD) / min(diagD) > 1e7) {
434                 break generationLoop;
435             }
436             // user-defined termination
437             if (getConvergenceChecker() != null) {
438                 final PointValuePair current
439                     = new PointValuePair(bestArx.getColumn(0),
440                                          isMinimize ? bestFitness : -bestFitness);
441                 if (lastResult != null &&
442                     getConvergenceChecker().converged(iterations, current, lastResult)) {
443                     break generationLoop;
444                     }
445                 lastResult = current;
446             }
447             // Adjust step size in case of equal function values (flat fitness)
448             if (bestValue == fitness[arindex[(int)(0.1+lambda/4.)]]) {
449                 sigma *= JdkMath.exp(0.2 + cs / damps);
450             }
451             if (iterations > 2 && JdkMath.max(historyWorst, bestFitness) -
452                 JdkMath.min(historyBest, bestFitness) == 0) {
453                 sigma *= JdkMath.exp(0.2 + cs / damps);
454             }
455             // store best in history
456             push(fitnessHistory,bestFitness);
457             if (generateStatistics) {
458                 statisticsSigmaHistory.add(sigma);
459                 statisticsFitnessHistory.add(bestFitness);
460                 statisticsMeanHistory.add(xmean.transpose());
461                 statisticsDHistory.add(diagD.transpose().scalarMultiply(1E5));
462             }
463         }
464         return optimum;
465     }
466 
467     /**
468      * Scans the list of (required and optional) optimization data that
469      * characterize the problem.
470      *
471      * @param optData Optimization data. The following data will be looked for:
472      * <ul>
473      *  <li>{@link Sigma}</li>
474      *  <li>{@link PopulationSize}</li>
475      * </ul>
476      */
477     @Override
478     protected void parseOptimizationData(OptimizationData... optData) {
479         // Allow base class to register its own data.
480         super.parseOptimizationData(optData);
481 
482         // The existing values (as set by the previous call) are reused if
483         // not provided in the argument list.
484         for (OptimizationData data : optData) {
485             if (data instanceof Sigma) {
486                 inputSigma = ((Sigma) data).getSigma();
487                 continue;
488             }
489             if (data instanceof PopulationSize) {
490                 lambda = ((PopulationSize) data).getPopulationSize();
491                 continue;
492             }
493         }
494 
495         checkParameters();
496     }
497 
498     /**
499      * Checks dimensions and values of boundaries and inputSigma if defined.
500      */
501     private void checkParameters() {
502         if (inputSigma != null) {
503             final double[] init = getStartPoint();
504 
505             if (inputSigma.length != init.length) {
506                 throw new DimensionMismatchException(inputSigma.length, init.length);
507             }
508 
509             final double[] lB = getLowerBound();
510             final double[] uB = getUpperBound();
511 
512             for (int i = 0; i < init.length; i++) {
513                 if (inputSigma[i] > uB[i] - lB[i]) {
514                     throw new OutOfRangeException(inputSigma[i], 0, uB[i] - lB[i]);
515                 }
516             }
517         }
518     }
519 
520     /**
521      * Initialization of the dynamic search parameters.
522      *
523      * @param guess Initial guess for the arguments of the fitness function.
524      */
525     private void initializeCMA(double[] guess) {
526         if (lambda <= 0) {
527             throw new NotStrictlyPositiveException(lambda);
528         }
529         // initialize sigma
530         final double[][] sigmaArray = new double[guess.length][1];
531         for (int i = 0; i < guess.length; i++) {
532             sigmaArray[i][0] = inputSigma[i];
533         }
534         final RealMatrix insigma = new Array2DRowRealMatrix(sigmaArray, false);
535         sigma = max(insigma); // overall standard deviation
536 
537         // initialize termination criteria
538         stopTolUpX = 1e3 * max(insigma);
539         stopTolX = 1e-11 * max(insigma);
540         stopTolFun = 1e-12;
541         stopTolHistFun = 1e-13;
542 
543         // initialize selection strategy parameters
544         mu = lambda / 2; // number of parents/points for recombination
545         logMu2 = JdkMath.log(mu + 0.5);
546         weights = log(sequence(1, mu, 1)).scalarMultiply(-1).scalarAdd(logMu2);
547         double sumw = 0;
548         double sumwq = 0;
549         for (int i = 0; i < mu; i++) {
550             double w = weights.getEntry(i, 0);
551             sumw += w;
552             sumwq += w * w;
553         }
554         weights = weights.scalarMultiply(1 / sumw);
555         mueff = sumw * sumw / sumwq; // variance-effectiveness of sum w_i x_i
556 
557         // initialize dynamic strategy parameters and constants
558         cc = (4 + mueff / dimension) /
559                 (dimension + 4 + 2 * mueff / dimension);
560         cs = (mueff + 2) / (dimension + mueff + 3.);
561         damps = (1 + 2 * JdkMath.max(0, JdkMath.sqrt((mueff - 1) /
562                                                        (dimension + 1)) - 1)) *
563             JdkMath.max(0.3,
564                          1 - dimension / (1e-6 + maxIterations)) + cs; // minor increment
565         ccov1 = 2 / ((dimension + 1.3) * (dimension + 1.3) + mueff);
566         ccovmu = JdkMath.min(1 - ccov1, 2 * (mueff - 2 + 1 / mueff) /
567                               ((dimension + 2) * (dimension + 2) + mueff));
568         ccov1Sep = JdkMath.min(1, ccov1 * (dimension + 1.5) / 3);
569         ccovmuSep = JdkMath.min(1 - ccov1, ccovmu * (dimension + 1.5) / 3);
570         chiN = JdkMath.sqrt(dimension) *
571                 (1 - 1 / ((double) 4 * dimension) + 1 / ((double) 21 * dimension * dimension));
572         // initialize CMA internal values - updated each generation
573         xmean = MatrixUtils.createColumnRealMatrix(guess); // objective variables
574         diagD = insigma.scalarMultiply(1 / sigma);
575         diagC = square(diagD);
576         pc = zeros(dimension, 1); // evolution paths for C and sigma
577         ps = zeros(dimension, 1); // B defines the coordinate system
578         normps = ps.getFrobeniusNorm();
579 
580         B = eye(dimension, dimension);
581         D = ones(dimension, 1); // diagonal D defines the scaling
582         BD = times(B, repmat(diagD.transpose(), dimension, 1));
583         C = B.multiply(diag(square(D)).multiply(B.transpose())); // covariance
584         historySize = 10 + (int) (3 * 10 * dimension / (double) lambda);
585         fitnessHistory = new double[historySize]; // history of fitness values
586         for (int i = 0; i < historySize; i++) {
587             fitnessHistory[i] = Double.MAX_VALUE;
588         }
589     }
590 
591     /**
592      * Update of the evolution paths ps and pc.
593      *
594      * @param zmean Weighted row matrix of the gaussian random numbers generating
595      * the current offspring.
596      * @param xold xmean matrix of the previous generation.
597      * @return hsig flag indicating a small correction.
598      */
599     private boolean updateEvolutionPaths(RealMatrix zmean, RealMatrix xold) {
600         ps = ps.scalarMultiply(1 - cs).add(
601                 B.multiply(zmean).scalarMultiply(
602                         JdkMath.sqrt(cs * (2 - cs) * mueff)));
603         normps = ps.getFrobeniusNorm();
604         final boolean hsig = normps /
605             JdkMath.sqrt(1 - JdkMath.pow(1 - cs, 2 * iterations)) /
606             chiN < 1.4 + 2 / ((double) dimension + 1);
607         pc = pc.scalarMultiply(1 - cc);
608         if (hsig) {
609             pc = pc.add(xmean.subtract(xold).scalarMultiply(JdkMath.sqrt(cc * (2 - cc) * mueff) / sigma));
610         }
611         return hsig;
612     }
613 
614     /**
615      * Update of the covariance matrix C for diagonalOnly > 0.
616      *
617      * @param hsig Flag indicating a small correction.
618      * @param bestArz Fitness-sorted matrix of the gaussian random values of the
619      * current offspring.
620      */
621     private void updateCovarianceDiagonalOnly(boolean hsig,
622                                               final RealMatrix bestArz) {
623         // minor correction if hsig==false
624         double oldFac = hsig ? 0 : ccov1Sep * cc * (2 - cc);
625         oldFac += 1 - ccov1Sep - ccovmuSep;
626         diagC = diagC.scalarMultiply(oldFac) // regard old matrix
627             .add(square(pc).scalarMultiply(ccov1Sep)) // plus rank one update
628             .add(times(diagC, square(bestArz).multiply(weights)) // plus rank mu update
629                  .scalarMultiply(ccovmuSep));
630         diagD = sqrt(diagC); // replaces eig(C)
631         if (diagonalOnly > 1 &&
632             iterations > diagonalOnly) {
633             // full covariance matrix from now on
634             diagonalOnly = 0;
635             B = eye(dimension, dimension);
636             BD = diag(diagD);
637             C = diag(diagC);
638         }
639     }
640 
641     /**
642      * Update of the covariance matrix C.
643      *
644      * @param hsig Flag indicating a small correction.
645      * @param bestArx Fitness-sorted matrix of the argument vectors producing the
646      * current offspring.
647      * @param arz Unsorted matrix containing the gaussian random values of the
648      * current offspring.
649      * @param arindex Indices indicating the fitness-order of the current offspring.
650      * @param xold xmean matrix of the previous generation.
651      */
652     private void updateCovariance(boolean hsig, final RealMatrix bestArx,
653                                   final RealMatrix arz, final int[] arindex,
654                                   final RealMatrix xold) {
655         double negccov = 0;
656         if (ccov1 + ccovmu > 0) {
657             final RealMatrix arpos = bestArx.subtract(repmat(xold, 1, mu))
658                 .scalarMultiply(1 / sigma); // mu difference vectors
659             final RealMatrix roneu = pc.multiply(pc.transpose())
660                 .scalarMultiply(ccov1); // rank one update
661             // minor correction if hsig==false
662             double oldFac = hsig ? 0 : ccov1 * cc * (2 - cc);
663             oldFac += 1 - ccov1 - ccovmu;
664             if (isActiveCMA) {
665                 // Adapt covariance matrix C active CMA
666                 negccov = (1 - ccovmu) * 0.25 * mueff /
667                     (JdkMath.pow(dimension + 2.0, 1.5) + 2 * mueff);
668                 // keep at least 0.66 in all directions, small popsize are most
669                 // critical
670                 final double negminresidualvariance = 0.66;
671                 // where to make up for the variance loss
672                 final double negalphaold = 0.5;
673                 // prepare vectors, compute negative updating matrix Cneg
674                 final int[] arReverseIndex = reverse(arindex);
675                 RealMatrix arzneg = selectColumns(arz, Arrays.copyOf(arReverseIndex, mu));
676                 RealMatrix arnorms = sqrt(sumRows(square(arzneg)));
677                 final int[] idxnorms = sortedIndices(arnorms.getRow(0));
678                 final RealMatrix arnormsSorted = selectColumns(arnorms, idxnorms);
679                 final int[] idxReverse = reverse(idxnorms);
680                 final RealMatrix arnormsReverse = selectColumns(arnorms, idxReverse);
681                 arnorms = divide(arnormsReverse, arnormsSorted);
682                 final int[] idxInv = inverse(idxnorms);
683                 final RealMatrix arnormsInv = selectColumns(arnorms, idxInv);
684                 // check and set learning rate negccov
685                 final double negcovMax = (1 - negminresidualvariance) /
686                     square(arnormsInv).multiply(weights).getEntry(0, 0);
687                 if (negccov > negcovMax) {
688                     negccov = negcovMax;
689                 }
690                 arzneg = times(arzneg, repmat(arnormsInv, dimension, 1));
691                 final RealMatrix artmp = BD.multiply(arzneg);
692                 final RealMatrix cNeg = artmp.multiply(diag(weights)).multiply(artmp.transpose());
693                 oldFac += negalphaold * negccov;
694                 C = C.scalarMultiply(oldFac)
695                     .add(roneu) // regard old matrix
696                     .add(arpos.scalarMultiply( // plus rank one update
697                                               ccovmu + (1 - negalphaold) * negccov) // plus rank mu update
698                          .multiply(times(repmat(weights, 1, dimension),
699                                          arpos.transpose())))
700                     .subtract(cNeg.scalarMultiply(negccov));
701             } else {
702                 // Adapt covariance matrix C - nonactive
703                 C = C.scalarMultiply(oldFac) // regard old matrix
704                     .add(roneu) // plus rank one update
705                     .add(arpos.scalarMultiply(ccovmu) // plus rank mu update
706                          .multiply(times(repmat(weights, 1, dimension),
707                                          arpos.transpose())));
708             }
709         }
710         updateBD(negccov);
711     }
712 
713     /**
714      * Update B and D from C.
715      *
716      * @param negccov Negative covariance factor.
717      */
718     private void updateBD(double negccov) {
719         if (ccov1 + ccovmu + negccov > 0 &&
720             (iterations % 1. / (ccov1 + ccovmu + negccov) / dimension / 10.) < 1) {
721             // to achieve O(N^2)
722             C = triu(C, 0).add(triu(C, 1).transpose());
723             // enforce symmetry to prevent complex numbers
724             final EigenDecomposition eig = new EigenDecomposition(C);
725             B = eig.getV(); // eigen decomposition, B==normalized eigenvectors
726             D = eig.getD();
727             diagD = diag(D);
728             if (min(diagD) <= 0) {
729                 for (int i = 0; i < dimension; i++) {
730                     if (diagD.getEntry(i, 0) < 0) {
731                         diagD.setEntry(i, 0, 0);
732                     }
733                 }
734                 final double tfac = max(diagD) / 1e14;
735                 C = C.add(eye(dimension, dimension).scalarMultiply(tfac));
736                 diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac));
737             }
738             if (max(diagD) > 1e14 * min(diagD)) {
739                 final double tfac = max(diagD) / 1e14 - min(diagD);
740                 C = C.add(eye(dimension, dimension).scalarMultiply(tfac));
741                 diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac));
742             }
743             diagC = diag(C);
744             diagD = sqrt(diagD); // D contains standard deviations now
745             BD = times(B, repmat(diagD.transpose(), dimension, 1)); // O(n^2)
746         }
747     }
748 
749     /**
750      * Pushes the current best fitness value in a history queue.
751      *
752      * @param vals History queue.
753      * @param val Current best fitness value.
754      */
755     private static void push(double[] vals, double val) {
756         if (vals.length - 1 > 0) {
757             System.arraycopy(vals, 0, vals, 1, vals.length - 1);
758         }
759         vals[0] = val;
760     }
761 
762     /**
763      * Sorts fitness values.
764      *
765      * @param doubles Array of values to be sorted.
766      * @return a sorted array of indices pointing into doubles.
767      */
768     private int[] sortedIndices(final double[] doubles) {
769         final DoubleIndex[] dis = new DoubleIndex[doubles.length];
770         for (int i = 0; i < doubles.length; i++) {
771             dis[i] = new DoubleIndex(doubles[i], i);
772         }
773         Arrays.sort(dis);
774         final int[] indices = new int[doubles.length];
775         for (int i = 0; i < doubles.length; i++) {
776             indices[i] = dis[i].index;
777         }
778         return indices;
779     }
780    /**
781      * Get range of values.
782      *
783      * @param vpPairs Array of valuePenaltyPairs to get range from.
784      * @return a double equal to maximum value minus minimum value.
785      */
786     private double valueRange(final ValuePenaltyPair[] vpPairs) {
787         double max = Double.NEGATIVE_INFINITY;
788         double min = Double.MAX_VALUE;
789         for (ValuePenaltyPair vpPair:vpPairs) {
790             if (vpPair.value > max) {
791                 max = vpPair.value;
792             }
793             if (vpPair.value < min) {
794                 min = vpPair.value;
795             }
796         }
797         return max-min;
798     }
799 
800     /**
801      * Used to sort fitness values. Sorting is always in lower value first
802      * order.
803      */
804     private static final class DoubleIndex implements Comparable<DoubleIndex> {
805         /** Value to compare. */
806         private final double value;
807         /** Index into sorted array. */
808         private final int index;
809 
810         /**
811          * @param value Value to compare.
812          * @param index Index into sorted array.
813          */
814         DoubleIndex(double value, int index) {
815             this.value = value;
816             this.index = index;
817         }
818 
819         /** {@inheritDoc} */
820         @Override
821         public int compareTo(DoubleIndex o) {
822             return Double.compare(value, o.value);
823         }
824 
825         /** {@inheritDoc} */
826         @Override
827         public boolean equals(Object other) {
828 
829             if (this == other) {
830                 return true;
831             }
832 
833             if (other instanceof DoubleIndex) {
834                 return Double.compare(value, ((DoubleIndex) other).value) == 0;
835             }
836 
837             return false;
838         }
839 
840         /** {@inheritDoc} */
841         @Override
842         public int hashCode() {
843             long bits = Double.doubleToLongBits(value);
844             return (int) (1438542 ^ (bits >>> 32) ^ bits);
845         }
846     }
847     /**
848      * Stores the value and penalty (for repair of out of bounds point).
849      */
850     private static final class ValuePenaltyPair {
851         /** Objective function value. */
852         private double value;
853         /** Penalty value for repair of out out of bounds points. */
854         private double penalty;
855 
856         /**
857          * @param value Function value.
858          * @param penalty Out-of-bounds penalty.
859         */
860         ValuePenaltyPair(final double value, final double penalty) {
861             this.value   = value;
862             this.penalty = penalty;
863         }
864     }
865 
866 
867     /**
868      * Normalizes fitness values to the range [0,1]. Adds a penalty to the
869      * fitness value if out of range.
870      */
871     private final class FitnessFunction {
872         /**
873          * Flag indicating whether the objective variables are forced into their
874          * bounds if defined.
875          */
876         private final boolean isRepairMode;
877 
878         /** Simple constructor.
879          */
880         FitnessFunction() {
881             isRepairMode = true;
882         }
883 
884         /**
885          * @param point Normalized objective variables.
886          * @return the objective value + penalty for violated bounds.
887          */
888         public ValuePenaltyPair value(final double[] point) {
889             final MultivariateFunction func = CMAESOptimizer.this.getObjectiveFunction();
890             double value;
891             double penalty = 0;
892             if (isRepairMode) {
893                 double[] repaired = repair(point);
894                 value = func.value(repaired);
895                 penalty = penalty(point, repaired);
896             } else {
897                 value = func.value(point);
898             }
899             value = isMinimize ? value : -value;
900             penalty = isMinimize ? penalty : -penalty;
901             return new ValuePenaltyPair(value,penalty);
902         }
903 
904         /**
905          * @param x Normalized objective variables.
906          * @return {@code true} if in bounds.
907          */
908         public boolean isFeasible(final double[] x) {
909             final double[] lB = CMAESOptimizer.this.getLowerBound();
910             final double[] uB = CMAESOptimizer.this.getUpperBound();
911 
912             for (int i = 0; i < x.length; i++) {
913                 if (x[i] < lB[i]) {
914                     return false;
915                 }
916                 if (x[i] > uB[i]) {
917                     return false;
918                 }
919             }
920             return true;
921         }
922 
923         /**
924          * @param x Normalized objective variables.
925          * @return the repaired (i.e. all in bounds) objective variables.
926          */
927         private double[] repair(final double[] x) {
928             final double[] lB = CMAESOptimizer.this.getLowerBound();
929             final double[] uB = CMAESOptimizer.this.getUpperBound();
930 
931             final double[] repaired = new double[x.length];
932             for (int i = 0; i < x.length; i++) {
933                 if (x[i] < lB[i]) {
934                     repaired[i] = lB[i];
935                 } else if (x[i] > uB[i]) {
936                     repaired[i] = uB[i];
937                 } else {
938                     repaired[i] = x[i];
939                 }
940             }
941             return repaired;
942         }
943 
944         /**
945          * @param x Normalized objective variables.
946          * @param repaired Repaired objective variables.
947          * @return Penalty value according to the violation of the bounds.
948          */
949         private double penalty(final double[] x, final double[] repaired) {
950             double penalty = 0;
951             for (int i = 0; i < x.length; i++) {
952                 double diff = JdkMath.abs(x[i] - repaired[i]);
953                 penalty += diff;
954             }
955             return isMinimize ? penalty : -penalty;
956         }
957     }
958 
959     // -----Matrix utility functions similar to the Matlab build in functions------
960 
961     /**
962      * @param m Input matrix
963      * @return Matrix representing the element-wise logarithm of m.
964      */
965     private static RealMatrix log(final RealMatrix m) {
966         final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
967         for (int r = 0; r < m.getRowDimension(); r++) {
968             for (int c = 0; c < m.getColumnDimension(); c++) {
969                 d[r][c] = JdkMath.log(m.getEntry(r, c));
970             }
971         }
972         return new Array2DRowRealMatrix(d, false);
973     }
974 
975     /**
976      * @param m Input matrix.
977      * @return Matrix representing the element-wise square root of m.
978      */
979     private static RealMatrix sqrt(final RealMatrix m) {
980         final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
981         for (int r = 0; r < m.getRowDimension(); r++) {
982             for (int c = 0; c < m.getColumnDimension(); c++) {
983                 d[r][c] = JdkMath.sqrt(m.getEntry(r, c));
984             }
985         }
986         return new Array2DRowRealMatrix(d, false);
987     }
988 
989     /**
990      * @param m Input matrix.
991      * @return Matrix representing the element-wise square of m.
992      */
993     private static RealMatrix square(final RealMatrix m) {
994         final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
995         for (int r = 0; r < m.getRowDimension(); r++) {
996             for (int c = 0; c < m.getColumnDimension(); c++) {
997                 double e = m.getEntry(r, c);
998                 d[r][c] = e * e;
999             }
1000         }
1001         return new Array2DRowRealMatrix(d, false);
1002     }
1003 
1004     /**
1005      * @param m Input matrix 1.
1006      * @param n Input matrix 2.
1007      * @return the matrix where the elements of m and n are element-wise multiplied.
1008      */
1009     private static RealMatrix times(final RealMatrix m, final RealMatrix n) {
1010         final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1011         for (int r = 0; r < m.getRowDimension(); r++) {
1012             for (int c = 0; c < m.getColumnDimension(); c++) {
1013                 d[r][c] = m.getEntry(r, c) * n.getEntry(r, c);
1014             }
1015         }
1016         return new Array2DRowRealMatrix(d, false);
1017     }
1018 
1019     /**
1020      * @param m Input matrix 1.
1021      * @param n Input matrix 2.
1022      * @return Matrix where the elements of m and n are element-wise divided.
1023      */
1024     private static RealMatrix divide(final RealMatrix m, final RealMatrix n) {
1025         final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1026         for (int r = 0; r < m.getRowDimension(); r++) {
1027             for (int c = 0; c < m.getColumnDimension(); c++) {
1028                 d[r][c] = m.getEntry(r, c) / n.getEntry(r, c);
1029             }
1030         }
1031         return new Array2DRowRealMatrix(d, false);
1032     }
1033 
1034     /**
1035      * @param m Input matrix.
1036      * @param cols Columns to select.
1037      * @return Matrix representing the selected columns.
1038      */
1039     private static RealMatrix selectColumns(final RealMatrix m, final int[] cols) {
1040         final double[][] d = new double[m.getRowDimension()][cols.length];
1041         for (int r = 0; r < m.getRowDimension(); r++) {
1042             for (int c = 0; c < cols.length; c++) {
1043                 d[r][c] = m.getEntry(r, cols[c]);
1044             }
1045         }
1046         return new Array2DRowRealMatrix(d, false);
1047     }
1048 
1049     /**
1050      * @param m Input matrix.
1051      * @param k Diagonal position.
1052      * @return Upper triangular part of matrix.
1053      */
1054     private static RealMatrix triu(final RealMatrix m, int k) {
1055         final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1056         for (int r = 0; r < m.getRowDimension(); r++) {
1057             for (int c = 0; c < m.getColumnDimension(); c++) {
1058                 d[r][c] = r <= c - k ? m.getEntry(r, c) : 0;
1059             }
1060         }
1061         return new Array2DRowRealMatrix(d, false);
1062     }
1063 
1064     /**
1065      * @param m Input matrix.
1066      * @return Row matrix representing the sums of the rows.
1067      */
1068     private static RealMatrix sumRows(final RealMatrix m) {
1069         final double[][] d = new double[1][m.getColumnDimension()];
1070         for (int c = 0; c < m.getColumnDimension(); c++) {
1071             double sum = 0;
1072             for (int r = 0; r < m.getRowDimension(); r++) {
1073                 sum += m.getEntry(r, c);
1074             }
1075             d[0][c] = sum;
1076         }
1077         return new Array2DRowRealMatrix(d, false);
1078     }
1079 
1080     /**
1081      * @param m Input matrix.
1082      * @return the diagonal n-by-n matrix if m is a column matrix or the column
1083      * matrix representing the diagonal if m is a n-by-n matrix.
1084      */
1085     private static RealMatrix diag(final RealMatrix m) {
1086         if (m.getColumnDimension() == 1) {
1087             final double[][] d = new double[m.getRowDimension()][m.getRowDimension()];
1088             for (int i = 0; i < m.getRowDimension(); i++) {
1089                 d[i][i] = m.getEntry(i, 0);
1090             }
1091             return new Array2DRowRealMatrix(d, false);
1092         } else {
1093             final double[][] d = new double[m.getRowDimension()][1];
1094             for (int i = 0; i < m.getColumnDimension(); i++) {
1095                 d[i][0] = m.getEntry(i, i);
1096             }
1097             return new Array2DRowRealMatrix(d, false);
1098         }
1099     }
1100 
1101     /**
1102      * Copies a column from m1 to m2.
1103      *
1104      * @param m1 Source matrix.
1105      * @param col1 Source column.
1106      * @param m2 Target matrix.
1107      * @param col2 Target column.
1108      */
1109     private static void copyColumn(final RealMatrix m1, int col1,
1110                                    RealMatrix m2, int col2) {
1111         for (int i = 0; i < m1.getRowDimension(); i++) {
1112             m2.setEntry(i, col2, m1.getEntry(i, col1));
1113         }
1114     }
1115 
1116     /**
1117      * @param n Number of rows.
1118      * @param m Number of columns.
1119      * @return n-by-m matrix filled with 1.
1120      */
1121     private static RealMatrix ones(int n, int m) {
1122         final double[][] d = new double[n][m];
1123         for (int r = 0; r < n; r++) {
1124             Arrays.fill(d[r], 1);
1125         }
1126         return new Array2DRowRealMatrix(d, false);
1127     }
1128 
1129     /**
1130      * @param n Number of rows.
1131      * @param m Number of columns.
1132      * @return n-by-m matrix of 0 values out of diagonal, and 1 values on
1133      * the diagonal.
1134      */
1135     private static RealMatrix eye(int n, int m) {
1136         final double[][] d = new double[n][m];
1137         for (int r = 0; r < n; r++) {
1138             if (r < m) {
1139                 d[r][r] = 1;
1140             }
1141         }
1142         return new Array2DRowRealMatrix(d, false);
1143     }
1144 
1145     /**
1146      * @param n Number of rows.
1147      * @param m Number of columns.
1148      * @return n-by-m matrix of zero values.
1149      */
1150     private static RealMatrix zeros(int n, int m) {
1151         return new Array2DRowRealMatrix(n, m);
1152     }
1153 
1154     /**
1155      * @param mat Input matrix.
1156      * @param n Number of row replicates.
1157      * @param m Number of column replicates.
1158      * @return a matrix which replicates the input matrix in both directions.
1159      */
1160     private static RealMatrix repmat(final RealMatrix mat, int n, int m) {
1161         final int rd = mat.getRowDimension();
1162         final int cd = mat.getColumnDimension();
1163         final double[][] d = new double[n * rd][m * cd];
1164         for (int r = 0; r < n * rd; r++) {
1165             for (int c = 0; c < m * cd; c++) {
1166                 d[r][c] = mat.getEntry(r % rd, c % cd);
1167             }
1168         }
1169         return new Array2DRowRealMatrix(d, false);
1170     }
1171 
1172     /**
1173      * @param start Start value.
1174      * @param end End value.
1175      * @param step Step size.
1176      * @return a sequence as column matrix.
1177      */
1178     private static RealMatrix sequence(double start, double end, double step) {
1179         final int size = (int) ((end - start) / step + 1);
1180         final double[][] d = new double[size][1];
1181         double value = start;
1182         for (int r = 0; r < size; r++) {
1183             d[r][0] = value;
1184             value += step;
1185         }
1186         return new Array2DRowRealMatrix(d, false);
1187     }
1188 
1189     /**
1190      * @param m Input matrix.
1191      * @return the maximum of the matrix element values.
1192      */
1193     private static double max(final RealMatrix m) {
1194         double max = -Double.MAX_VALUE;
1195         for (int r = 0; r < m.getRowDimension(); r++) {
1196             for (int c = 0; c < m.getColumnDimension(); c++) {
1197                 double e = m.getEntry(r, c);
1198                 if (max < e) {
1199                     max = e;
1200                 }
1201             }
1202         }
1203         return max;
1204     }
1205 
1206     /**
1207      * @param m Input matrix.
1208      * @return the minimum of the matrix element values.
1209      */
1210     private static double min(final RealMatrix m) {
1211         double min = Double.MAX_VALUE;
1212         for (int r = 0; r < m.getRowDimension(); r++) {
1213             for (int c = 0; c < m.getColumnDimension(); c++) {
1214                 double e = m.getEntry(r, c);
1215                 if (min > e) {
1216                     min = e;
1217                 }
1218             }
1219         }
1220         return min;
1221     }
1222 
1223     /**
1224      * @param m Input array.
1225      * @return the maximum of the array values.
1226      */
1227     private static double max(final double[] m) {
1228         double max = -Double.MAX_VALUE;
1229         for (int r = 0; r < m.length; r++) {
1230             if (max < m[r]) {
1231                 max = m[r];
1232             }
1233         }
1234         return max;
1235     }
1236 
1237     /**
1238      * @param m Input array.
1239      * @return the minimum of the array values.
1240      */
1241     private static double min(final double[] m) {
1242         double min = Double.MAX_VALUE;
1243         for (int r = 0; r < m.length; r++) {
1244             if (min > m[r]) {
1245                 min = m[r];
1246             }
1247         }
1248         return min;
1249     }
1250 
1251     /**
1252      * @param indices Input index array.
1253      * @return the inverse of the mapping defined by indices.
1254      */
1255     private static int[] inverse(final int[] indices) {
1256         final int[] inverse = new int[indices.length];
1257         for (int i = 0; i < indices.length; i++) {
1258             inverse[indices[i]] = i;
1259         }
1260         return inverse;
1261     }
1262 
1263     /**
1264      * @param indices Input index array.
1265      * @return the indices in inverse order (last is first).
1266      */
1267     private static int[] reverse(final int[] indices) {
1268         final int[] reverse = new int[indices.length];
1269         for (int i = 0; i < indices.length; i++) {
1270             reverse[i] = indices[indices.length - i - 1];
1271         }
1272         return reverse;
1273     }
1274 
1275     /**
1276      * @param size Length of random array.
1277      * @return an array of Gaussian random numbers.
1278      */
1279     private double[] randn(int size) {
1280         final double[] randn = new double[size];
1281         for (int i = 0; i < size; i++) {
1282             randn[i] = random.sample();
1283         }
1284         return randn;
1285     }
1286 
1287     /**
1288      * @param size Number of rows.
1289      * @param popSize Population size.
1290      * @return a 2-dimensional matrix of Gaussian random numbers.
1291      */
1292     private RealMatrix randn1(int size, int popSize) {
1293         final double[][] d = new double[size][popSize];
1294         for (int r = 0; r < size; r++) {
1295             for (int c = 0; c < popSize; c++) {
1296                 d[r][c] = random.sample();
1297             }
1298         }
1299         return new Array2DRowRealMatrix(d, false);
1300     }
1301 }