View Javadoc
1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one or more
3    * contributor license agreements.  See the NOTICE file distributed with
4    * this work for additional information regarding copyright ownership.
5    * The ASF licenses this file to You under the Apache License, Version 2.0
6    * (the "License"); you may not use this file except in compliance with
7    * the License.  You may obtain a copy of the License at
8    *
9    *      http://www.apache.org/licenses/LICENSE-2.0
10   *
11   * Unless required by applicable law or agreed to in writing, software
12   * distributed under the License is distributed on an "AS IS" BASIS,
13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14   * See the License for the specific language governing permissions and
15   * limitations under the License.
16   */
17  
18  /*
19   * This is not the original file distributed by the Apache Software Foundation
20   * It has been modified by the Hipparchus project
21   */
22  package org.hipparchus.optim.nonlinear.vector.leastsquares;
23  
24  import java.util.Arrays;
25  
26  import org.hipparchus.exception.MathIllegalStateException;
27  import org.hipparchus.linear.ArrayRealVector;
28  import org.hipparchus.linear.RealMatrix;
29  import org.hipparchus.optim.ConvergenceChecker;
30  import org.hipparchus.optim.LocalizedOptimFormats;
31  import org.hipparchus.optim.nonlinear.vector.leastsquares.LeastSquaresProblem.Evaluation;
32  import org.hipparchus.util.FastMath;
33  import org.hipparchus.util.Incrementor;
34  import org.hipparchus.util.Precision;
35  
36  
37  /**
38   * This class solves a least-squares problem using the Levenberg-Marquardt
39   * algorithm.
40   *
41   * <p>This implementation <em>should</em> work even for over-determined systems
42   * (i.e. systems having more point than equations). Over-determined systems
43   * are solved by ignoring the point which have the smallest impact according
44   * to their jacobian column norm. Only the rank of the matrix and some loop bounds
45   * are changed to implement this.</p>
46   *
47   * <p>The resolution engine is a simple translation of the MINPACK <a
48   * href="http://www.netlib.org/minpack/lmder.f">lmder</a> routine with minor
49   * changes. The changes include the over-determined resolution, the use of
50   * inherited convergence checker and the Q.R. decomposition which has been
51   * rewritten following the algorithm described in the
52   * P. Lascaux and R. Theodor book <i>Analyse num&eacute;rique matricielle
53   * appliqu&eacute;e &agrave; l'art de l'ing&eacute;nieur</i>, Masson 1986.</p>
54   * <p>The authors of the original fortran version are:
55   * <ul>
56   * <li>Argonne National Laboratory. MINPACK project. March 1980</li>
57   * <li>Burton S. Garbow</li>
58   * <li>Kenneth E. Hillstrom</li>
59   * <li>Jorge J. More</li>
60   * </ul>
61   * The redistribution policy for MINPACK is available <a
62   * href="http://www.netlib.org/minpack/disclaimer">here</a>, for convenience, it
63   * is reproduced below.</p>
64   *
65   * <table border="0" width="80%" cellpadding="10" align="center" bgcolor="#E0E0E0">
66   * <tr><td>
67   *    Minpack Copyright Notice (1999) University of Chicago.
68   *    All rights reserved
69   * </td></tr>
70   * <tr><td>
71   * Redistribution and use in source and binary forms, with or without
72   * modification, are permitted provided that the following conditions
73   * are met:
74   * <ol>
75   *  <li>Redistributions of source code must retain the above copyright
76   *      notice, this list of conditions and the following disclaimer.</li>
77   * <li>Redistributions in binary form must reproduce the above
78   *     copyright notice, this list of conditions and the following
79   *     disclaimer in the documentation and/or other materials provided
80   *     with the distribution.</li>
81   * <li>The end-user documentation included with the redistribution, if any,
82   *     must include the following acknowledgment:
83   *     <code>This product includes software developed by the University of
84   *           Chicago, as Operator of Argonne National Laboratory.</code>
85   *     Alternately, this acknowledgment may appear in the software itself,
86   *     if and wherever such third-party acknowledgments normally appear.</li>
87   * <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
88   *     WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
89   *     UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
90   *     THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
91   *     IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
92   *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
93   *     OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
94   *     OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
95   *     USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
96   *     THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
97   *     DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
98   *     UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
99   *     BE CORRECTED.</strong></li>
100  * <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
101  *     HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
102  *     ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
103  *     INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
104  *     ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
105  *     PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
106  *     SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
107  *     (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
108  *     EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
109  *     POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
110  * <ol></td></tr>
111  * </table>
112  *
113  */
114 public class LevenbergMarquardtOptimizer implements LeastSquaresOptimizer {
115 
116     /** Twice the "epsilon machine". */
117     private static final double TWO_EPS = 2 * Precision.EPSILON;
118 
119     /* configuration parameters */
120     /** Positive input variable used in determining the initial step bound. */
121     private final double initialStepBoundFactor;
122     /** Desired relative error in the sum of squares. */
123     private final double costRelativeTolerance;
124     /**  Desired relative error in the approximate solution parameters. */
125     private final double parRelativeTolerance;
126     /** Desired max cosine on the orthogonality between the function vector
127      * and the columns of the jacobian. */
128     private final double orthoTolerance;
129     /** Threshold for QR ranking. */
130     private final double qrRankingThreshold;
131 
132     /** Default constructor.
133      * <p>
134      * The default values for the algorithm settings are:
135      * <ul>
136      *  <li>Initial step bound factor: 100</li>
137      *  <li>Cost relative tolerance: 1e-10</li>
138      *  <li>Parameters relative tolerance: 1e-10</li>
139      *  <li>Orthogonality tolerance: 1e-10</li>
140      *  <li>QR ranking threshold: {@link Precision#SAFE_MIN}</li>
141      * </ul>
142      **/
143     public LevenbergMarquardtOptimizer() {
144         this(100, 1e-10, 1e-10, 1e-10, Precision.SAFE_MIN);
145     }
146 
147     /**
148      * Construct an instance with all parameters specified.
149      *
150      * @param initialStepBoundFactor initial step bound factor
151      * @param costRelativeTolerance  cost relative tolerance
152      * @param parRelativeTolerance   parameters relative tolerance
153      * @param orthoTolerance         orthogonality tolerance
154      * @param qrRankingThreshold     threshold in the QR decomposition. Columns with a 2
155      *                               norm less than this threshold are considered to be
156      *                               all 0s.
157      */
158     public LevenbergMarquardtOptimizer(
159             final double initialStepBoundFactor,
160             final double costRelativeTolerance,
161             final double parRelativeTolerance,
162             final double orthoTolerance,
163             final double qrRankingThreshold) {
164         this.initialStepBoundFactor = initialStepBoundFactor;
165         this.costRelativeTolerance = costRelativeTolerance;
166         this.parRelativeTolerance = parRelativeTolerance;
167         this.orthoTolerance = orthoTolerance;
168         this.qrRankingThreshold = qrRankingThreshold;
169     }
170 
171     /**
172      * @param newInitialStepBoundFactor Positive input variable used in
173      * determining the initial step bound. This bound is set to the
174      * product of initialStepBoundFactor and the euclidean norm of
175      * {@code diag * x} if non-zero, or else to {@code newInitialStepBoundFactor}
176      * itself. In most cases factor should lie in the interval
177      * {@code (0.1, 100.0)}. {@code 100} is a generally recommended value.
178      * of the matrix is reduced.
179      * @return a new instance.
180      */
181     public LevenbergMarquardtOptimizer withInitialStepBoundFactor(double newInitialStepBoundFactor) {
182         return new LevenbergMarquardtOptimizer(
183                 newInitialStepBoundFactor,
184                 costRelativeTolerance,
185                 parRelativeTolerance,
186                 orthoTolerance,
187                 qrRankingThreshold);
188     }
189 
190     /**
191      * @param newCostRelativeTolerance Desired relative error in the sum of squares.
192      * @return a new instance.
193      */
194     public LevenbergMarquardtOptimizer withCostRelativeTolerance(double newCostRelativeTolerance) {
195         return new LevenbergMarquardtOptimizer(
196                 initialStepBoundFactor,
197                 newCostRelativeTolerance,
198                 parRelativeTolerance,
199                 orthoTolerance,
200                 qrRankingThreshold);
201     }
202 
203     /**
204      * @param newParRelativeTolerance Desired relative error in the approximate solution
205      * parameters.
206      * @return a new instance.
207      */
208     public LevenbergMarquardtOptimizer withParameterRelativeTolerance(double newParRelativeTolerance) {
209         return new LevenbergMarquardtOptimizer(
210                 initialStepBoundFactor,
211                 costRelativeTolerance,
212                 newParRelativeTolerance,
213                 orthoTolerance,
214                 qrRankingThreshold);
215     }
216 
217     /**
218      * Modifies the given parameter.
219      *
220      * @param newOrthoTolerance Desired max cosine on the orthogonality between
221      * the function vector and the columns of the Jacobian.
222      * @return a new instance.
223      */
224     public LevenbergMarquardtOptimizer withOrthoTolerance(double newOrthoTolerance) {
225         return new LevenbergMarquardtOptimizer(
226                 initialStepBoundFactor,
227                 costRelativeTolerance,
228                 parRelativeTolerance,
229                 newOrthoTolerance,
230                 qrRankingThreshold);
231     }
232 
233     /**
234      * @param newQRRankingThreshold Desired threshold for QR ranking.
235      * If the squared norm of a column vector is smaller or equal to this
236      * threshold during QR decomposition, it is considered to be a zero vector
237      * and hence the rank of the matrix is reduced.
238      * @return a new instance.
239      */
240     public LevenbergMarquardtOptimizer withRankingThreshold(double newQRRankingThreshold) {
241         return new LevenbergMarquardtOptimizer(
242                 initialStepBoundFactor,
243                 costRelativeTolerance,
244                 parRelativeTolerance,
245                 orthoTolerance,
246                 newQRRankingThreshold);
247     }
248 
249     /**
250      * Gets the value of a tuning parameter.
251      * @see #withInitialStepBoundFactor(double)
252      *
253      * @return the parameter's value.
254      */
255     public double getInitialStepBoundFactor() {
256         return initialStepBoundFactor;
257     }
258 
259     /**
260      * Gets the value of a tuning parameter.
261      * @see #withCostRelativeTolerance(double)
262      *
263      * @return the parameter's value.
264      */
265     public double getCostRelativeTolerance() {
266         return costRelativeTolerance;
267     }
268 
269     /**
270      * Gets the value of a tuning parameter.
271      * @see #withParameterRelativeTolerance(double)
272      *
273      * @return the parameter's value.
274      */
275     public double getParameterRelativeTolerance() {
276         return parRelativeTolerance;
277     }
278 
279     /**
280      * Gets the value of a tuning parameter.
281      * @see #withOrthoTolerance(double)
282      *
283      * @return the parameter's value.
284      */
285     public double getOrthoTolerance() {
286         return orthoTolerance;
287     }
288 
289     /**
290      * Gets the value of a tuning parameter.
291      * @see #withRankingThreshold(double)
292      *
293      * @return the parameter's value.
294      */
295     public double getRankingThreshold() {
296         return qrRankingThreshold;
297     }
298 
299     /** {@inheritDoc} */
300     @Override
301     public Optimum optimize(final LeastSquaresProblem problem) {
302         // Pull in relevant data from the problem as locals.
303         final int nR = problem.getObservationSize(); // Number of observed data.
304         final int nC = problem.getParameterSize(); // Number of parameters.
305         // Counters.
306         final Incrementor iterationCounter = problem.getIterationCounter();
307         final Incrementor evaluationCounter = problem.getEvaluationCounter();
308         // Convergence criterion.
309         final ConvergenceChecker<Evaluation> checker = problem.getConvergenceChecker();
310 
311         // arrays shared with the other private methods
312         final int solvedCols  = FastMath.min(nR, nC);
313         /* Parameters evolution direction associated with lmPar. */
314         double[] lmDir = new double[nC];
315         /* Levenberg-Marquardt parameter. */
316         double lmPar = 0;
317 
318         // local point
319         double   delta   = 0;
320         double   xNorm   = 0;
321         double[] diag    = new double[nC];
322         double[] oldX    = new double[nC];
323         double[] oldRes  = new double[nR];
324         double[] qtf     = new double[nR];
325         double[] work1   = new double[nC];
326         double[] work2   = new double[nC];
327         double[] work3   = new double[nC];
328 
329 
330         // Evaluate the function at the starting point and calculate its norm.
331         evaluationCounter.increment();
332         //value will be reassigned in the loop
333         Evaluation current = problem.evaluate(problem.getStart());
334         double[] currentResiduals = current.getResiduals().toArray();
335         double currentCost = current.getCost();
336         double[] currentPoint = current.getPoint().toArray();
337 
338         // Outer loop.
339         boolean firstIteration = true;
340         while (true) {
341             iterationCounter.increment();
342 
343             final Evaluation previous = current;
344 
345             // QR decomposition of the jacobian matrix
346             final InternalData internalData = qrDecomposition(current.getJacobian(), solvedCols);
347             final double[][] weightedJacobian = internalData.weightedJacobian;
348             final int[] permutation = internalData.permutation;
349             final double[] diagR = internalData.diagR;
350             final double[] jacNorm = internalData.jacNorm;
351 
352             //residuals already have weights applied
353             double[] weightedResidual = currentResiduals;
354             for (int i = 0; i < nR; i++) {
355                 qtf[i] = weightedResidual[i];
356             }
357 
358             // compute Qt.res
359             qTy(qtf, internalData);
360 
361             // now we don't need Q anymore,
362             // so let jacobian contain the R matrix with its diagonal elements
363             for (int k = 0; k < solvedCols; ++k) {
364                 int pk = permutation[k];
365                 weightedJacobian[k][pk] = diagR[pk];
366             }
367 
368             if (firstIteration) {
369                 // scale the point according to the norms of the columns
370                 // of the initial jacobian
371                 xNorm = 0;
372                 for (int k = 0; k < nC; ++k) {
373                     double dk = jacNorm[k];
374                     if (dk == 0) {
375                         dk = 1.0;
376                     }
377                     double xk = dk * currentPoint[k];
378                     xNorm  += xk * xk;
379                     diag[k] = dk;
380                 }
381                 xNorm = FastMath.sqrt(xNorm);
382 
383                 // initialize the step bound delta
384                 delta = (xNorm == 0) ? initialStepBoundFactor : (initialStepBoundFactor * xNorm);
385             }
386 
387             // check orthogonality between function vector and jacobian columns
388             double maxCosine = 0;
389             if (currentCost != 0) {
390                 for (int j = 0; j < solvedCols; ++j) {
391                     int    pj = permutation[j];
392                     double s  = jacNorm[pj];
393                     if (s != 0) {
394                         double sum = 0;
395                         for (int i = 0; i <= j; ++i) {
396                             sum += weightedJacobian[i][pj] * qtf[i];
397                         }
398                         maxCosine = FastMath.max(maxCosine, FastMath.abs(sum) / (s * currentCost));
399                     }
400                 }
401             }
402             if (maxCosine <= orthoTolerance) {
403                 // Convergence has been reached.
404                 return Optimum.of(
405                         current,
406                         evaluationCounter.getCount(),
407                         iterationCounter.getCount());
408             }
409 
410             // rescale if necessary
411             for (int j = 0; j < nC; ++j) {
412                 diag[j] = FastMath.max(diag[j], jacNorm[j]);
413             }
414 
415             // Inner loop.
416             for (double ratio = 0; ratio < 1.0e-4;) {
417 
418                 // save the state
419                 for (int j = 0; j < solvedCols; ++j) {
420                     int pj = permutation[j];
421                     oldX[pj] = currentPoint[pj];
422                 }
423                 final double previousCost = currentCost;
424                 double[] tmpVec = weightedResidual;
425                 weightedResidual = oldRes;
426                 oldRes    = tmpVec;
427 
428                 // determine the Levenberg-Marquardt parameter
429                 lmPar = determineLMParameter(qtf, delta, diag,
430                                      internalData, solvedCols,
431                                      work1, work2, work3, lmDir, lmPar);
432 
433                 // compute the new point and the norm of the evolution direction
434                 double lmNorm = 0;
435                 for (int j = 0; j < solvedCols; ++j) {
436                     int pj = permutation[j];
437                     lmDir[pj] = -lmDir[pj];
438                     currentPoint[pj] = oldX[pj] + lmDir[pj];
439                     double s = diag[pj] * lmDir[pj];
440                     lmNorm  += s * s;
441                 }
442                 lmNorm = FastMath.sqrt(lmNorm);
443                 // on the first iteration, adjust the initial step bound.
444                 if (firstIteration) {
445                     delta = FastMath.min(delta, lmNorm);
446                 }
447 
448                 // Evaluate the function at x + p and calculate its norm.
449                 evaluationCounter.increment();
450                 current = problem.evaluate(new ArrayRealVector(currentPoint));
451                 currentResiduals = current.getResiduals().toArray();
452                 currentCost = current.getCost();
453                 currentPoint = current.getPoint().toArray();
454 
455                 // compute the scaled actual reduction
456                 double actRed = -1.0;
457                 if (0.1 * currentCost < previousCost) {
458                     double r = currentCost / previousCost;
459                     actRed = 1.0 - r * r;
460                 }
461 
462                 // compute the scaled predicted reduction
463                 // and the scaled directional derivative
464                 for (int j = 0; j < solvedCols; ++j) {
465                     int pj = permutation[j];
466                     double dirJ = lmDir[pj];
467                     work1[j] = 0;
468                     for (int i = 0; i <= j; ++i) {
469                         work1[i] += weightedJacobian[i][pj] * dirJ;
470                     }
471                 }
472                 double coeff1 = 0;
473                 for (int j = 0; j < solvedCols; ++j) {
474                     coeff1 += work1[j] * work1[j];
475                 }
476                 double pc2 = previousCost * previousCost;
477                 coeff1 /= pc2;
478                 double coeff2 = lmPar * lmNorm * lmNorm / pc2;
479                 double preRed = coeff1 + 2 * coeff2;
480                 double dirDer = -(coeff1 + coeff2);
481 
482                 // ratio of the actual to the predicted reduction
483                 ratio = (preRed == 0) ? 0 : (actRed / preRed);
484 
485                 // update the step bound
486                 if (ratio <= 0.25) {
487                     double tmp =
488                         (actRed < 0) ? (0.5 * dirDer / (dirDer + 0.5 * actRed)) : 0.5;
489                         if ((0.1 * currentCost >= previousCost) || (tmp < 0.1)) {
490                             tmp = 0.1;
491                         }
492                         delta = tmp * FastMath.min(delta, 10.0 * lmNorm);
493                         lmPar /= tmp;
494                 } else if ((lmPar == 0) || (ratio >= 0.75)) {
495                     delta = 2 * lmNorm;
496                     lmPar *= 0.5;
497                 }
498 
499                 // test for successful iteration.
500                 if (ratio >= 1.0e-4) {
501                     // successful iteration, update the norm
502                     firstIteration = false;
503                     xNorm = 0;
504                     for (int k = 0; k < nC; ++k) {
505                         double xK = diag[k] * currentPoint[k];
506                         xNorm += xK * xK;
507                     }
508                     xNorm = FastMath.sqrt(xNorm);
509 
510                     // tests for convergence.
511                     if (checker != null && checker.converged(iterationCounter.getCount(), previous, current)) {
512                         return Optimum.of(current, evaluationCounter.getCount(), iterationCounter.getCount());
513                     }
514                 } else {
515                     // failed iteration, reset the previous values
516                     currentCost = previousCost;
517                     for (int j = 0; j < solvedCols; ++j) {
518                         int pj = permutation[j];
519                         currentPoint[pj] = oldX[pj];
520                     }
521                     tmpVec    = weightedResidual;
522                     weightedResidual = oldRes;
523                     oldRes    = tmpVec;
524                     // Reset "current" to previous values.
525                     current = previous;
526                 }
527 
528                 // Default convergence criteria.
529                 if ((FastMath.abs(actRed) <= costRelativeTolerance &&
530                      preRed <= costRelativeTolerance &&
531                      ratio <= 2.0) ||
532                     delta <= parRelativeTolerance * xNorm) {
533                     return Optimum.of(current, evaluationCounter.getCount(), iterationCounter.getCount());
534                 }
535 
536                 // tests for termination and stringent tolerances
537                 if (FastMath.abs(actRed) <= TWO_EPS &&
538                     preRed <= TWO_EPS &&
539                     ratio <= 2.0) {
540                     throw new MathIllegalStateException(LocalizedOptimFormats.TOO_SMALL_COST_RELATIVE_TOLERANCE,
541                                                         costRelativeTolerance);
542                 } else if (delta <= TWO_EPS * xNorm) {
543                     throw new MathIllegalStateException(LocalizedOptimFormats.TOO_SMALL_PARAMETERS_RELATIVE_TOLERANCE,
544                                                         parRelativeTolerance);
545                 } else if (maxCosine <= TWO_EPS) {
546                     throw new MathIllegalStateException(LocalizedOptimFormats.TOO_SMALL_ORTHOGONALITY_TOLERANCE,
547                                                         orthoTolerance);
548                 }
549             }
550         }
551     }
552 
553     /**
554      * Holds internal data.
555      * This structure was created so that all optimizer fields can be "final".
556      * Code should be further refactored in order to not pass around arguments
557      * that will modified in-place (cf. "work" arrays).
558      */
559     private static class InternalData {
560         /** Weighted Jacobian. */
561         private final double[][] weightedJacobian;
562         /** Columns permutation array. */
563         private final int[] permutation;
564         /** Rank of the Jacobian matrix. */
565         private final int rank;
566         /** Diagonal elements of the R matrix in the QR decomposition. */
567         private final double[] diagR;
568         /** Norms of the columns of the jacobian matrix. */
569         private final double[] jacNorm;
570         /** Coefficients of the Householder transforms vectors. */
571         private final double[] beta;
572 
573         /**
574          * <p>
575          * All arrays are stored by reference
576          * </p>
577          * @param weightedJacobian Weighted Jacobian.
578          * @param permutation Columns permutation array.
579          * @param rank Rank of the Jacobian matrix.
580          * @param diagR Diagonal elements of the R matrix in the QR decomposition.
581          * @param jacNorm Norms of the columns of the jacobian matrix.
582          * @param beta Coefficients of the Householder transforms vectors.
583          */
584         InternalData(double[][] weightedJacobian, // NOPMD - staring array references is intentional and documented here
585                      int[] permutation,           // NOPMD - staring array references is intentional and documented here
586                      int rank,
587                      double[] diagR,              // NOPMD - staring array references is intentional and documented here
588                      double[] jacNorm,            // NOPMD - staring array references is intentional and documented here
589                      double[] beta) {             // NOPMD - staring array references is intentional and documented here
590             this.weightedJacobian = weightedJacobian;
591             this.permutation = permutation;
592             this.rank = rank;
593             this.diagR = diagR;
594             this.jacNorm = jacNorm;
595             this.beta = beta;
596         }
597     }
598 
599     /**
600      * Determines the Levenberg-Marquardt parameter.
601      *
602      * <p>This implementation is a translation in Java of the MINPACK
603      * <a href="http://www.netlib.org/minpack/lmpar.f">lmpar</a>
604      * routine.</p>
605      * <p>This method sets the lmPar and lmDir attributes.</p>
606      * <p>The authors of the original fortran function are:</p>
607      * <ul>
608      *   <li>Argonne National Laboratory. MINPACK project. March 1980</li>
609      *   <li>Burton  S. Garbow</li>
610      *   <li>Kenneth E. Hillstrom</li>
611      *   <li>Jorge   J. More</li>
612      * </ul>
613      * <p>Luc Maisonobe did the Java translation.</p>
614      *
615      * @param qy Array containing qTy.
616      * @param delta Upper bound on the euclidean norm of diagR * lmDir.
617      * @param diag Diagonal matrix.
618      * @param internalData Data (modified in-place in this method).
619      * @param solvedCols Number of solved point.
620      * @param work1 work array
621      * @param work2 work array
622      * @param work3 work array
623      * @param lmDir the "returned" LM direction will be stored in this array.
624      * @param lmPar the value of the LM parameter from the previous iteration.
625      * @return the new LM parameter
626      */
627     private double determineLMParameter(double[] qy, double delta, double[] diag,
628                                       InternalData internalData, int solvedCols,
629                                       double[] work1, double[] work2, double[] work3,
630                                       double[] lmDir, double lmPar) {
631         final double[][] weightedJacobian = internalData.weightedJacobian;
632         final int[] permutation = internalData.permutation;
633         final int rank = internalData.rank;
634         final double[] diagR = internalData.diagR;
635 
636         final int nC = weightedJacobian[0].length;
637 
638         // compute and store in x the gauss-newton direction, if the
639         // jacobian is rank-deficient, obtain a least squares solution
640         for (int j = 0; j < rank; ++j) {
641             lmDir[permutation[j]] = qy[j];
642         }
643         for (int j = rank; j < nC; ++j) {
644             lmDir[permutation[j]] = 0;
645         }
646         for (int k = rank - 1; k >= 0; --k) {
647             int pk = permutation[k];
648             double ypk = lmDir[pk] / diagR[pk];
649             for (int i = 0; i < k; ++i) {
650                 lmDir[permutation[i]] -= ypk * weightedJacobian[i][pk];
651             }
652             lmDir[pk] = ypk;
653         }
654 
655         // evaluate the function at the origin, and test
656         // for acceptance of the Gauss-Newton direction
657         double dxNorm = 0;
658         for (int j = 0; j < solvedCols; ++j) {
659             int pj = permutation[j];
660             double s = diag[pj] * lmDir[pj];
661             work1[pj] = s;
662             dxNorm += s * s;
663         }
664         dxNorm = FastMath.sqrt(dxNorm);
665         double fp = dxNorm - delta;
666         if (fp <= 0.1 * delta) {
667             lmPar = 0;
668             return lmPar;
669         }
670 
671         // if the jacobian is not rank deficient, the Newton step provides
672         // a lower bound, parl, for the zero of the function,
673         // otherwise set this bound to zero
674         double sum2;
675         double parl = 0;
676         if (rank == solvedCols) {
677             for (int j = 0; j < solvedCols; ++j) {
678                 int pj = permutation[j];
679                 work1[pj] *= diag[pj] / dxNorm;
680             }
681             sum2 = 0;
682             for (int j = 0; j < solvedCols; ++j) {
683                 int pj = permutation[j];
684                 double sum = 0;
685                 for (int i = 0; i < j; ++i) {
686                     sum += weightedJacobian[i][pj] * work1[permutation[i]];
687                 }
688                 double s = (work1[pj] - sum) / diagR[pj];
689                 work1[pj] = s;
690                 sum2 += s * s;
691             }
692             parl = fp / (delta * sum2);
693         }
694 
695         // calculate an upper bound, paru, for the zero of the function
696         sum2 = 0;
697         for (int j = 0; j < solvedCols; ++j) {
698             int pj = permutation[j];
699             double sum = 0;
700             for (int i = 0; i <= j; ++i) {
701                 sum += weightedJacobian[i][pj] * qy[i];
702             }
703             sum /= diag[pj];
704             sum2 += sum * sum;
705         }
706         double gNorm = FastMath.sqrt(sum2);
707         double paru = gNorm / delta;
708         if (paru == 0) {
709             paru = Precision.SAFE_MIN / FastMath.min(delta, 0.1);
710         }
711 
712         // if the input par lies outside of the interval (parl,paru),
713         // set par to the closer endpoint
714         lmPar = FastMath.min(paru, FastMath.max(lmPar, parl));
715         if (lmPar == 0) {
716             lmPar = gNorm / dxNorm;
717         }
718 
719         for (int countdown = 10; countdown >= 0; --countdown) {
720 
721             // evaluate the function at the current value of lmPar
722             if (lmPar == 0) {
723                 lmPar = FastMath.max(Precision.SAFE_MIN, 0.001 * paru);
724             }
725             double sPar = FastMath.sqrt(lmPar);
726             for (int j = 0; j < solvedCols; ++j) {
727                 int pj = permutation[j];
728                 work1[pj] = sPar * diag[pj];
729             }
730             determineLMDirection(qy, work1, work2, internalData, solvedCols, work3, lmDir);
731 
732             dxNorm = 0;
733             for (int j = 0; j < solvedCols; ++j) {
734                 int pj = permutation[j];
735                 double s = diag[pj] * lmDir[pj];
736                 work3[pj] = s;
737                 dxNorm += s * s;
738             }
739             dxNorm = FastMath.sqrt(dxNorm);
740             double previousFP = fp;
741             fp = dxNorm - delta;
742 
743             // if the function is small enough, accept the current value
744             // of lmPar, also test for the exceptional cases where parl is zero
745             if (FastMath.abs(fp) <= 0.1 * delta ||
746                 (parl == 0 &&
747                  fp <= previousFP &&
748                  previousFP < 0)) {
749                 return lmPar;
750             }
751 
752             // compute the Newton correction
753             for (int j = 0; j < solvedCols; ++j) {
754                 int pj = permutation[j];
755                 work1[pj] = work3[pj] * diag[pj] / dxNorm;
756             }
757             for (int j = 0; j < solvedCols; ++j) {
758                 int pj = permutation[j];
759                 work1[pj] /= work2[j];
760                 double tmp = work1[pj];
761                 for (int i = j + 1; i < solvedCols; ++i) {
762                     work1[permutation[i]] -= weightedJacobian[i][pj] * tmp;
763                 }
764             }
765             sum2 = 0;
766             for (int j = 0; j < solvedCols; ++j) {
767                 double s = work1[permutation[j]];
768                 sum2 += s * s;
769             }
770             double correction = fp / (delta * sum2);
771 
772             // depending on the sign of the function, update parl or paru.
773             if (fp > 0) {
774                 parl = FastMath.max(parl, lmPar);
775             } else if (fp < 0) {
776                 paru = FastMath.min(paru, lmPar);
777             }
778 
779             // compute an improved estimate for lmPar
780             lmPar = FastMath.max(parl, lmPar + correction);
781         }
782 
783         return lmPar;
784     }
785 
786     /**
787      * Solve a*x = b and d*x = 0 in the least squares sense.
788      * <p>This implementation is a translation in Java of the MINPACK
789      * <a href="http://www.netlib.org/minpack/qrsolv.f">qrsolv</a>
790      * routine.</p>
791      * <p>This method sets the lmDir and lmDiag attributes.</p>
792      * <p>The authors of the original fortran function are:</p>
793      * <ul>
794      *   <li>Argonne National Laboratory. MINPACK project. March 1980</li>
795      *   <li>Burton  S. Garbow</li>
796      *   <li>Kenneth E. Hillstrom</li>
797      *   <li>Jorge   J. More</li>
798      * </ul>
799      * <p>Luc Maisonobe did the Java translation.</p>
800      *
801      * @param qy array containing qTy
802      * @param diag diagonal matrix
803      * @param lmDiag diagonal elements associated with lmDir
804      * @param internalData Data (modified in-place in this method).
805      * @param solvedCols Number of sloved point.
806      * @param work work array
807      * @param lmDir the "returned" LM direction is stored in this array
808      */
809     private void determineLMDirection(double[] qy, double[] diag,
810                                       double[] lmDiag,
811                                       InternalData internalData,
812                                       int solvedCols,
813                                       double[] work,
814                                       double[] lmDir) {
815         final int[] permutation = internalData.permutation;
816         final double[][] weightedJacobian = internalData.weightedJacobian;
817         final double[] diagR = internalData.diagR;
818 
819         // copy R and Qty to preserve input and initialize s
820         //  in particular, save the diagonal elements of R in lmDir
821         for (int j = 0; j < solvedCols; ++j) {
822             int pj = permutation[j];
823             for (int i = j + 1; i < solvedCols; ++i) {
824                 weightedJacobian[i][pj] = weightedJacobian[j][permutation[i]];
825             }
826             lmDir[j] = diagR[pj];
827             work[j]  = qy[j];
828         }
829 
830         // eliminate the diagonal matrix d using a Givens rotation
831         for (int j = 0; j < solvedCols; ++j) {
832 
833             // prepare the row of d to be eliminated, locating the
834             // diagonal element using p from the Q.R. factorization
835             int pj = permutation[j];
836             double dpj = diag[pj];
837             if (dpj != 0) {
838                 Arrays.fill(lmDiag, j + 1, lmDiag.length, 0);
839             }
840             lmDiag[j] = dpj;
841 
842             //  the transformations to eliminate the row of d
843             // modify only a single element of Qty
844             // beyond the first n, which is initially zero.
845             double qtbpj = 0;
846             for (int k = j; k < solvedCols; ++k) {
847                 int pk = permutation[k];
848 
849                 // determine a Givens rotation which eliminates the
850                 // appropriate element in the current row of d
851                 if (lmDiag[k] != 0) {
852 
853                     final double sin;
854                     final double cos;
855                     double rkk = weightedJacobian[k][pk];
856                     if (FastMath.abs(rkk) < FastMath.abs(lmDiag[k])) {
857                         final double cotan = rkk / lmDiag[k];
858                         sin   = 1.0 / FastMath.sqrt(1.0 + cotan * cotan);
859                         cos   = sin * cotan;
860                     } else {
861                         final double tan = lmDiag[k] / rkk;
862                         cos = 1.0 / FastMath.sqrt(1.0 + tan * tan);
863                         sin = cos * tan;
864                     }
865 
866                     // compute the modified diagonal element of R and
867                     // the modified element of (Qty,0)
868                     weightedJacobian[k][pk] = cos * rkk + sin * lmDiag[k];
869                     final double temp = cos * work[k] + sin * qtbpj;
870                     qtbpj = -sin * work[k] + cos * qtbpj;
871                     work[k] = temp;
872 
873                     // accumulate the tranformation in the row of s
874                     for (int i = k + 1; i < solvedCols; ++i) {
875                         double rik = weightedJacobian[i][pk];
876                         final double temp2 = cos * rik + sin * lmDiag[i];
877                         lmDiag[i] = -sin * rik + cos * lmDiag[i];
878                         weightedJacobian[i][pk] = temp2;
879                     }
880                 }
881             }
882 
883             // store the diagonal element of s and restore
884             // the corresponding diagonal element of R
885             lmDiag[j] = weightedJacobian[j][permutation[j]];
886             weightedJacobian[j][permutation[j]] = lmDir[j];
887         }
888 
889         // solve the triangular system for z, if the system is
890         // singular, then obtain a least squares solution
891         int nSing = solvedCols;
892         for (int j = 0; j < solvedCols; ++j) {
893             if ((lmDiag[j] == 0) && (nSing == solvedCols)) {
894                 nSing = j;
895             }
896             if (nSing < solvedCols) {
897                 work[j] = 0;
898             }
899         }
900         if (nSing > 0) {
901             for (int j = nSing - 1; j >= 0; --j) {
902                 int pj = permutation[j];
903                 double sum = 0;
904                 for (int i = j + 1; i < nSing; ++i) {
905                     sum += weightedJacobian[i][pj] * work[i];
906                 }
907                 work[j] = (work[j] - sum) / lmDiag[j];
908             }
909         }
910 
911         // permute the components of z back to components of lmDir
912         for (int j = 0; j < lmDir.length; ++j) {
913             lmDir[permutation[j]] = work[j];
914         }
915     }
916 
917     /**
918      * Decompose a matrix A as A.P = Q.R using Householder transforms.
919      * <p>As suggested in the P. Lascaux and R. Theodor book
920      * <i>Analyse num&eacute;rique matricielle appliqu&eacute;e &agrave;
921      * l'art de l'ing&eacute;nieur</i> (Masson, 1986), instead of representing
922      * the Householder transforms with u<sub>k</sub> unit vectors such that:
923      * <pre>
924      * H<sub>k</sub> = I - 2u<sub>k</sub>.u<sub>k</sub><sup>t</sup>
925      * </pre>
926      * we use <sub>k</sub> non-unit vectors such that:
927      * <pre>
928      * H<sub>k</sub> = I - beta<sub>k</sub>v<sub>k</sub>.v<sub>k</sub><sup>t</sup>
929      * </pre>
930      * where v<sub>k</sub> = a<sub>k</sub> - alpha<sub>k</sub> e<sub>k</sub>.
931      * The beta<sub>k</sub> coefficients are provided upon exit as recomputing
932      * them from the v<sub>k</sub> vectors would be costly.</p>
933      * <p>This decomposition handles rank deficient cases since the tranformations
934      * are performed in non-increasing columns norms order thanks to columns
935      * pivoting. The diagonal elements of the R matrix are therefore also in
936      * non-increasing absolute values order.</p>
937      *
938      * @param jacobian Weighted Jacobian matrix at the current point.
939      * @param solvedCols Number of solved point.
940      * @return data used in other methods of this class.
941      * @throws MathIllegalStateException if the decomposition cannot be performed.
942      */
943     private InternalData qrDecomposition(RealMatrix jacobian, int solvedCols)
944         throws MathIllegalStateException {
945         // Code in this class assumes that the weighted Jacobian is -(W^(1/2) J),
946         // hence the multiplication by -1.
947         final double[][] weightedJacobian = jacobian.scalarMultiply(-1).getData();
948 
949         final int nR = weightedJacobian.length;
950         final int nC = weightedJacobian[0].length;
951 
952         final int[] permutation = new int[nC];
953         final double[] diagR = new double[nC];
954         final double[] jacNorm = new double[nC];
955         final double[] beta = new double[nC];
956 
957         // initializations
958         for (int k = 0; k < nC; ++k) {
959             permutation[k] = k;
960             double norm2 = 0;
961             for (int i = 0; i < nR; ++i) {
962                 double akk = weightedJacobian[i][k];
963                 norm2 += akk * akk;
964             }
965             jacNorm[k] = FastMath.sqrt(norm2);
966         }
967 
968         // transform the matrix column after column
969         for (int k = 0; k < nC; ++k) {
970 
971             // select the column with the greatest norm on active components
972             int nextColumn = -1;
973             double ak2 = Double.NEGATIVE_INFINITY;
974             for (int i = k; i < nC; ++i) {
975                 double norm2 = 0;
976                 for (int j = k; j < nR; ++j) {
977                     double aki = weightedJacobian[j][permutation[i]];
978                     norm2 += aki * aki;
979                 }
980                 if (Double.isInfinite(norm2) || Double.isNaN(norm2)) {
981                     throw new MathIllegalStateException(LocalizedOptimFormats.UNABLE_TO_PERFORM_QR_DECOMPOSITION_ON_JACOBIAN,
982                                                         nR, nC);
983                 }
984                 if (norm2 > ak2) {
985                     nextColumn = i;
986                     ak2        = norm2;
987                 }
988             }
989             if (ak2 <= qrRankingThreshold) {
990                 return new InternalData(weightedJacobian, permutation, k, diagR, jacNorm, beta);
991             }
992             int pk = permutation[nextColumn];
993             permutation[nextColumn] = permutation[k];
994             permutation[k] = pk;
995 
996             // choose alpha such that Hk.u = alpha ek
997             double akk = weightedJacobian[k][pk];
998             double alpha = (akk > 0) ? -FastMath.sqrt(ak2) : FastMath.sqrt(ak2);
999             double betak = 1.0 / (ak2 - akk * alpha);
1000             beta[pk] = betak;
1001 
1002             // transform the current column
1003             diagR[pk] = alpha;
1004             weightedJacobian[k][pk] -= alpha;
1005 
1006             // transform the remaining columns
1007             for (int dk = nC - 1 - k; dk > 0; --dk) {
1008                 double gamma = 0;
1009                 for (int j = k; j < nR; ++j) {
1010                     gamma += weightedJacobian[j][pk] * weightedJacobian[j][permutation[k + dk]];
1011                 }
1012                 gamma *= betak;
1013                 for (int j = k; j < nR; ++j) {
1014                     weightedJacobian[j][permutation[k + dk]] -= gamma * weightedJacobian[j][pk];
1015                 }
1016             }
1017         }
1018 
1019         return new InternalData(weightedJacobian, permutation, solvedCols, diagR, jacNorm, beta);
1020     }
1021 
1022     /**
1023      * Compute the product Qt.y for some Q.R. decomposition.
1024      *
1025      * @param y vector to multiply (will be overwritten with the result)
1026      * @param internalData Data.
1027      */
1028     private void qTy(double[] y,
1029                      InternalData internalData) {
1030         final double[][] weightedJacobian = internalData.weightedJacobian;
1031         final int[] permutation = internalData.permutation;
1032         final double[] beta = internalData.beta;
1033 
1034         final int nR = weightedJacobian.length;
1035         final int nC = weightedJacobian[0].length;
1036 
1037         for (int k = 0; k < nC; ++k) {
1038             int pk = permutation[k];
1039             double gamma = 0;
1040             for (int i = k; i < nR; ++i) {
1041                 gamma += weightedJacobian[i][pk] * y[i];
1042             }
1043             gamma *= beta[pk];
1044             for (int i = k; i < nR; ++i) {
1045                 y[i] -= gamma * weightedJacobian[i][pk];
1046             }
1047         }
1048     }
1049 }