LevenbergMarquardtOptimizer.java

  1. /*
  2.  * Licensed to the Apache Software Foundation (ASF) under one or more
  3.  * contributor license agreements.  See the NOTICE file distributed with
  4.  * this work for additional information regarding copyright ownership.
  5.  * The ASF licenses this file to You under the Apache License, Version 2.0
  6.  * (the "License"); you may not use this file except in compliance with
  7.  * the License.  You may obtain a copy of the License at
  8.  *
  9.  *      https://www.apache.org/licenses/LICENSE-2.0
  10.  *
  11.  * Unless required by applicable law or agreed to in writing, software
  12.  * distributed under the License is distributed on an "AS IS" BASIS,
  13.  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14.  * See the License for the specific language governing permissions and
  15.  * limitations under the License.
  16.  */

  17. /*
  18.  * This is not the original file distributed by the Apache Software Foundation
  19.  * It has been modified by the Hipparchus project
  20.  */
  21. package org.hipparchus.optim.nonlinear.vector.leastsquares;

  22. import java.util.Arrays;

  23. import org.hipparchus.exception.MathIllegalStateException;
  24. import org.hipparchus.linear.ArrayRealVector;
  25. import org.hipparchus.linear.RealMatrix;
  26. import org.hipparchus.optim.ConvergenceChecker;
  27. import org.hipparchus.optim.LocalizedOptimFormats;
  28. import org.hipparchus.optim.nonlinear.vector.leastsquares.LeastSquaresProblem.Evaluation;
  29. import org.hipparchus.util.FastMath;
  30. import org.hipparchus.util.Incrementor;
  31. import org.hipparchus.util.Precision;


  32. /**
  33.  * This class solves a least-squares problem using the Levenberg-Marquardt
  34.  * algorithm.
  35.  *
  36.  * <p>This implementation <em>should</em> work even for over-determined systems
  37.  * (i.e. systems having more point than equations). Over-determined systems
  38.  * are solved by ignoring the point which have the smallest impact according
  39.  * to their jacobian column norm. Only the rank of the matrix and some loop bounds
  40.  * are changed to implement this.</p>
  41.  *
  42.  * <p>The resolution engine is a simple translation of the MINPACK <a
  43.  * href="http://www.netlib.org/minpack/lmder.f">lmder</a> routine with minor
  44.  * changes. The changes include the over-determined resolution, the use of
  45.  * inherited convergence checker and the Q.R. decomposition which has been
  46.  * rewritten following the algorithm described in the
  47.  * P. Lascaux and R. Theodor book <i>Analyse num&eacute;rique matricielle
  48.  * appliqu&eacute;e &agrave; l'art de l'ing&eacute;nieur</i>, Masson 1986.</p>
  49.  * <p>The authors of the original fortran version are:</p>
  50.  * <ul>
  51.  * <li>Argonne National Laboratory. MINPACK project. March 1980</li>
  52.  * <li>Burton S. Garbow</li>
  53.  * <li>Kenneth E. Hillstrom</li>
  54.  * <li>Jorge J. More</li>
  55.  * </ul>
  56.  *<p>The redistribution policy for MINPACK is available <a
  57.  * href="http://www.netlib.org/minpack/disclaimer">here</a>, for convenience, it
  58.  * is reproduced below.</p>
  59.  *
  60.  * <blockquote>
  61.  * <p>
  62.  *    Minpack Copyright Notice (1999) University of Chicago.
  63.  *    All rights reserved
  64.  * </p>
  65.  * <p>
  66.  * Redistribution and use in source and binary forms, with or without
  67.  * modification, are permitted provided that the following conditions
  68.  * are met:</p>
  69.  * <ol>
  70.  *  <li>Redistributions of source code must retain the above copyright
  71.  *      notice, this list of conditions and the following disclaimer.</li>
  72.  * <li>Redistributions in binary form must reproduce the above
  73.  *     copyright notice, this list of conditions and the following
  74.  *     disclaimer in the documentation and/or other materials provided
  75.  *     with the distribution.</li>
  76.  * <li>The end-user documentation included with the redistribution, if any,
  77.  *     must include the following acknowledgment:
  78.  *     <code>This product includes software developed by the University of
  79.  *           Chicago, as Operator of Argonne National Laboratory.</code>
  80.  *     Alternately, this acknowledgment may appear in the software itself,
  81.  *     if and wherever such third-party acknowledgments normally appear.</li>
  82.  * <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
  83.  *     WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
  84.  *     UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
  85.  *     THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
  86.  *     IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
  87.  *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
  88.  *     OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
  89.  *     OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
  90.  *     USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
  91.  *     THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
  92.  *     DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
  93.  *     UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
  94.  *     BE CORRECTED.</strong></li>
  95.  * <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
  96.  *     HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
  97.  *     ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
  98.  *     INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
  99.  *     ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
  100.  *     PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
  101.  *     SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
  102.  *     (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
  103.  *     EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
  104.  *     POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
  105.  * </ol>
  106.  * </blockquote>
  107.  *
  108.  */
  109. public class LevenbergMarquardtOptimizer implements LeastSquaresOptimizer {

  110.     /** Twice the "epsilon machine". */
  111.     private static final double TWO_EPS = 2 * Precision.EPSILON;

  112.     /* configuration parameters */
  113.     /** Positive input variable used in determining the initial step bound. */
  114.     private final double initialStepBoundFactor;
  115.     /** Desired relative error in the sum of squares. */
  116.     private final double costRelativeTolerance;
  117.     /**  Desired relative error in the approximate solution parameters. */
  118.     private final double parRelativeTolerance;
  119.     /** Desired max cosine on the orthogonality between the function vector
  120.      * and the columns of the jacobian. */
  121.     private final double orthoTolerance;
  122.     /** Threshold for QR ranking. */
  123.     private final double qrRankingThreshold;

  124.     /** Default constructor.
  125.      * <p>
  126.      * The default values for the algorithm settings are:
  127.      * <ul>
  128.      *  <li>Initial step bound factor: 100</li>
  129.      *  <li>Cost relative tolerance: 1e-10</li>
  130.      *  <li>Parameters relative tolerance: 1e-10</li>
  131.      *  <li>Orthogonality tolerance: 1e-10</li>
  132.      *  <li>QR ranking threshold: {@link Precision#SAFE_MIN}</li>
  133.      * </ul>
  134.      **/
  135.     public LevenbergMarquardtOptimizer() {
  136.         this(100, 1e-10, 1e-10, 1e-10, Precision.SAFE_MIN);
  137.     }

  138.     /**
  139.      * Construct an instance with all parameters specified.
  140.      *
  141.      * @param initialStepBoundFactor initial step bound factor
  142.      * @param costRelativeTolerance  cost relative tolerance
  143.      * @param parRelativeTolerance   parameters relative tolerance
  144.      * @param orthoTolerance         orthogonality tolerance
  145.      * @param qrRankingThreshold     threshold in the QR decomposition. Columns with a 2
  146.      *                               norm less than this threshold are considered to be
  147.      *                               all 0s.
  148.      */
  149.     public LevenbergMarquardtOptimizer(
  150.             final double initialStepBoundFactor,
  151.             final double costRelativeTolerance,
  152.             final double parRelativeTolerance,
  153.             final double orthoTolerance,
  154.             final double qrRankingThreshold) {
  155.         this.initialStepBoundFactor = initialStepBoundFactor;
  156.         this.costRelativeTolerance = costRelativeTolerance;
  157.         this.parRelativeTolerance = parRelativeTolerance;
  158.         this.orthoTolerance = orthoTolerance;
  159.         this.qrRankingThreshold = qrRankingThreshold;
  160.     }

  161.     /** Build new instance with initial step bound factor.
  162.      * @param newInitialStepBoundFactor Positive input variable used in
  163.      * determining the initial step bound. This bound is set to the
  164.      * product of initialStepBoundFactor and the euclidean norm of
  165.      * {@code diag * x} if non-zero, or else to {@code newInitialStepBoundFactor}
  166.      * itself. In most cases factor should lie in the interval
  167.      * {@code (0.1, 100.0)}. {@code 100} is a generally recommended value.
  168.      * of the matrix is reduced.
  169.      * @return a new instance.
  170.      */
  171.     public LevenbergMarquardtOptimizer withInitialStepBoundFactor(double newInitialStepBoundFactor) {
  172.         return new LevenbergMarquardtOptimizer(
  173.                 newInitialStepBoundFactor,
  174.                 costRelativeTolerance,
  175.                 parRelativeTolerance,
  176.                 orthoTolerance,
  177.                 qrRankingThreshold);
  178.     }

  179.     /** Build new instance with cost relative tolerance.
  180.      * @param newCostRelativeTolerance Desired relative error in the sum of squares.
  181.      * @return a new instance.
  182.      */
  183.     public LevenbergMarquardtOptimizer withCostRelativeTolerance(double newCostRelativeTolerance) {
  184.         return new LevenbergMarquardtOptimizer(
  185.                 initialStepBoundFactor,
  186.                 newCostRelativeTolerance,
  187.                 parRelativeTolerance,
  188.                 orthoTolerance,
  189.                 qrRankingThreshold);
  190.     }

  191.     /** Build new instance with parameter relative tolerance.
  192.      * @param newParRelativeTolerance Desired relative error in the approximate solution
  193.      * parameters.
  194.      * @return a new instance.
  195.      */
  196.     public LevenbergMarquardtOptimizer withParameterRelativeTolerance(double newParRelativeTolerance) {
  197.         return new LevenbergMarquardtOptimizer(
  198.                 initialStepBoundFactor,
  199.                 costRelativeTolerance,
  200.                 newParRelativeTolerance,
  201.                 orthoTolerance,
  202.                 qrRankingThreshold);
  203.     }

  204.     /** Build new instance with ortho tolerance.
  205.      * @param newOrthoTolerance Desired max cosine on the orthogonality between
  206.      * the function vector and the columns of the Jacobian.
  207.      * @return a new instance.
  208.      */
  209.     public LevenbergMarquardtOptimizer withOrthoTolerance(double newOrthoTolerance) {
  210.         return new LevenbergMarquardtOptimizer(
  211.                 initialStepBoundFactor,
  212.                 costRelativeTolerance,
  213.                 parRelativeTolerance,
  214.                 newOrthoTolerance,
  215.                 qrRankingThreshold);
  216.     }

  217.     /** Build new instance with ranking threshold.
  218.      * @param newQRRankingThreshold Desired threshold for QR ranking.
  219.      * If the squared norm of a column vector is smaller or equal to this
  220.      * threshold during QR decomposition, it is considered to be a zero vector
  221.      * and hence the rank of the matrix is reduced.
  222.      * @return a new instance.
  223.      */
  224.     public LevenbergMarquardtOptimizer withRankingThreshold(double newQRRankingThreshold) {
  225.         return new LevenbergMarquardtOptimizer(
  226.                 initialStepBoundFactor,
  227.                 costRelativeTolerance,
  228.                 parRelativeTolerance,
  229.                 orthoTolerance,
  230.                 newQRRankingThreshold);
  231.     }

  232.     /**
  233.      * Gets the value of a tuning parameter.
  234.      * @see #withInitialStepBoundFactor(double)
  235.      *
  236.      * @return the parameter's value.
  237.      */
  238.     public double getInitialStepBoundFactor() {
  239.         return initialStepBoundFactor;
  240.     }

  241.     /**
  242.      * Gets the value of a tuning parameter.
  243.      * @see #withCostRelativeTolerance(double)
  244.      *
  245.      * @return the parameter's value.
  246.      */
  247.     public double getCostRelativeTolerance() {
  248.         return costRelativeTolerance;
  249.     }

  250.     /**
  251.      * Gets the value of a tuning parameter.
  252.      * @see #withParameterRelativeTolerance(double)
  253.      *
  254.      * @return the parameter's value.
  255.      */
  256.     public double getParameterRelativeTolerance() {
  257.         return parRelativeTolerance;
  258.     }

  259.     /**
  260.      * Gets the value of a tuning parameter.
  261.      * @see #withOrthoTolerance(double)
  262.      *
  263.      * @return the parameter's value.
  264.      */
  265.     public double getOrthoTolerance() {
  266.         return orthoTolerance;
  267.     }

  268.     /**
  269.      * Gets the value of a tuning parameter.
  270.      * @see #withRankingThreshold(double)
  271.      *
  272.      * @return the parameter's value.
  273.      */
  274.     public double getRankingThreshold() {
  275.         return qrRankingThreshold;
  276.     }

  277.     /** {@inheritDoc} */
  278.     @Override
  279.     public Optimum optimize(final LeastSquaresProblem problem) {
  280.         // Pull in relevant data from the problem as locals.
  281.         final int nR = problem.getObservationSize(); // Number of observed data.
  282.         final int nC = problem.getParameterSize(); // Number of parameters.
  283.         // Counters.
  284.         final Incrementor iterationCounter = problem.getIterationCounter();
  285.         final Incrementor evaluationCounter = problem.getEvaluationCounter();
  286.         // Convergence criterion.
  287.         final ConvergenceChecker<Evaluation> checker = problem.getConvergenceChecker();

  288.         // arrays shared with the other private methods
  289.         final int solvedCols  = FastMath.min(nR, nC);
  290.         /* Parameters evolution direction associated with lmPar. */
  291.         double[] lmDir = new double[nC];
  292.         /* Levenberg-Marquardt parameter. */
  293.         double lmPar = 0;

  294.         // local point
  295.         double   delta   = 0;
  296.         double   xNorm   = 0;
  297.         double[] diag    = new double[nC];
  298.         double[] oldX    = new double[nC];
  299.         double[] oldRes  = new double[nR];
  300.         double[] qtf     = new double[nR];
  301.         double[] work1   = new double[nC];
  302.         double[] work2   = new double[nC];
  303.         double[] work3   = new double[nC];


  304.         // Evaluate the function at the starting point and calculate its norm.
  305.         evaluationCounter.increment();
  306.         //value will be reassigned in the loop
  307.         Evaluation current = problem.evaluate(problem.getStart());
  308.         double[] currentResiduals = current.getResiduals().toArray();
  309.         double currentCost = current.getCost();
  310.         double[] currentPoint = current.getPoint().toArray();

  311.         // Outer loop.
  312.         boolean firstIteration = true;
  313.         while (true) {
  314.             iterationCounter.increment();

  315.             final Evaluation previous = current;

  316.             // QR decomposition of the jacobian matrix
  317.             final InternalData internalData = qrDecomposition(current.getJacobian(), solvedCols);
  318.             final double[][] weightedJacobian = internalData.weightedJacobian;
  319.             final int[] permutation = internalData.permutation;
  320.             final double[] diagR = internalData.diagR;
  321.             final double[] jacNorm = internalData.jacNorm;

  322.             //residuals already have weights applied
  323.             double[] weightedResidual = currentResiduals;
  324.             System.arraycopy(weightedResidual, 0, qtf, 0, nR);

  325.             // compute Qt.res
  326.             qTy(qtf, internalData);

  327.             // now we don't need Q anymore,
  328.             // so let jacobian contain the R matrix with its diagonal elements
  329.             for (int k = 0; k < solvedCols; ++k) {
  330.                 int pk = permutation[k];
  331.                 weightedJacobian[k][pk] = diagR[pk];
  332.             }

  333.             if (firstIteration) {
  334.                 // scale the point according to the norms of the columns
  335.                 // of the initial jacobian
  336.                 xNorm = 0;
  337.                 for (int k = 0; k < nC; ++k) {
  338.                     double dk = jacNorm[k];
  339.                     if (dk == 0) {
  340.                         dk = 1.0;
  341.                     }
  342.                     double xk = dk * currentPoint[k];
  343.                     xNorm  += xk * xk;
  344.                     diag[k] = dk;
  345.                 }
  346.                 xNorm = FastMath.sqrt(xNorm);

  347.                 // initialize the step bound delta
  348.                 delta = (xNorm == 0) ? initialStepBoundFactor : (initialStepBoundFactor * xNorm);
  349.             }

  350.             // check orthogonality between function vector and jacobian columns
  351.             double maxCosine = 0;
  352.             if (currentCost != 0) {
  353.                 for (int j = 0; j < solvedCols; ++j) {
  354.                     int    pj = permutation[j];
  355.                     double s  = jacNorm[pj];
  356.                     if (s != 0) {
  357.                         double sum = 0;
  358.                         for (int i = 0; i <= j; ++i) {
  359.                             sum += weightedJacobian[i][pj] * qtf[i];
  360.                         }
  361.                         maxCosine = FastMath.max(maxCosine, FastMath.abs(sum) / (s * currentCost));
  362.                     }
  363.                 }
  364.             }
  365.             if (maxCosine <= orthoTolerance) {
  366.                 // Convergence has been reached.
  367.                 return Optimum.of(
  368.                         current,
  369.                         evaluationCounter.getCount(),
  370.                         iterationCounter.getCount());
  371.             }

  372.             // rescale if necessary
  373.             for (int j = 0; j < nC; ++j) {
  374.                 diag[j] = FastMath.max(diag[j], jacNorm[j]);
  375.             }

  376.             // Inner loop.
  377.             for (double ratio = 0; ratio < 1.0e-4;) {

  378.                 // save the state
  379.                 for (int j = 0; j < solvedCols; ++j) {
  380.                     int pj = permutation[j];
  381.                     oldX[pj] = currentPoint[pj];
  382.                 }
  383.                 final double previousCost = currentCost;
  384.                 double[] tmpVec = weightedResidual;
  385.                 weightedResidual = oldRes;
  386.                 oldRes    = tmpVec;

  387.                 // determine the Levenberg-Marquardt parameter
  388.                 lmPar = determineLMParameter(qtf, delta, diag,
  389.                                      internalData, solvedCols,
  390.                                      work1, work2, work3, lmDir, lmPar);

  391.                 // compute the new point and the norm of the evolution direction
  392.                 double lmNorm = 0;
  393.                 for (int j = 0; j < solvedCols; ++j) {
  394.                     int pj = permutation[j];
  395.                     lmDir[pj] = -lmDir[pj];
  396.                     currentPoint[pj] = oldX[pj] + lmDir[pj];
  397.                     double s = diag[pj] * lmDir[pj];
  398.                     lmNorm  += s * s;
  399.                 }
  400.                 lmNorm = FastMath.sqrt(lmNorm);
  401.                 // on the first iteration, adjust the initial step bound.
  402.                 if (firstIteration) {
  403.                     delta = FastMath.min(delta, lmNorm);
  404.                 }

  405.                 // Evaluate the function at x + p and calculate its norm.
  406.                 evaluationCounter.increment();
  407.                 current = problem.evaluate(new ArrayRealVector(currentPoint));
  408.                 currentResiduals = current.getResiduals().toArray();
  409.                 currentCost = current.getCost();
  410.                 currentPoint = current.getPoint().toArray();

  411.                 // compute the scaled actual reduction
  412.                 double actRed = -1.0;
  413.                 if (0.1 * currentCost < previousCost) {
  414.                     double r = currentCost / previousCost;
  415.                     actRed = 1.0 - r * r;
  416.                 }

  417.                 // compute the scaled predicted reduction
  418.                 // and the scaled directional derivative
  419.                 for (int j = 0; j < solvedCols; ++j) {
  420.                     int pj = permutation[j];
  421.                     double dirJ = lmDir[pj];
  422.                     work1[j] = 0;
  423.                     for (int i = 0; i <= j; ++i) {
  424.                         work1[i] += weightedJacobian[i][pj] * dirJ;
  425.                     }
  426.                 }
  427.                 double coeff1 = 0;
  428.                 for (int j = 0; j < solvedCols; ++j) {
  429.                     coeff1 += work1[j] * work1[j];
  430.                 }
  431.                 double pc2 = previousCost * previousCost;
  432.                 coeff1 /= pc2;
  433.                 double coeff2 = lmPar * lmNorm * lmNorm / pc2;
  434.                 double preRed = coeff1 + 2 * coeff2;
  435.                 double dirDer = -(coeff1 + coeff2);

  436.                 // ratio of the actual to the predicted reduction
  437.                 ratio = (preRed == 0) ? 0 : (actRed / preRed);

  438.                 // update the step bound
  439.                 if (ratio <= 0.25) {
  440.                     double tmp =
  441.                         (actRed < 0) ? (0.5 * dirDer / (dirDer + 0.5 * actRed)) : 0.5;
  442.                         if ((0.1 * currentCost >= previousCost) || (tmp < 0.1)) {
  443.                             tmp = 0.1;
  444.                         }
  445.                         delta = tmp * FastMath.min(delta, 10.0 * lmNorm);
  446.                         lmPar /= tmp;
  447.                 } else if ((lmPar == 0) || (ratio >= 0.75)) {
  448.                     delta = 2 * lmNorm;
  449.                     lmPar *= 0.5;
  450.                 }

  451.                 // test for successful iteration.
  452.                 if (ratio >= 1.0e-4) {
  453.                     // successful iteration, update the norm
  454.                     firstIteration = false;
  455.                     xNorm = 0;
  456.                     for (int k = 0; k < nC; ++k) {
  457.                         double xK = diag[k] * currentPoint[k];
  458.                         xNorm += xK * xK;
  459.                     }
  460.                     xNorm = FastMath.sqrt(xNorm);

  461.                     // tests for convergence.
  462.                     if (checker != null && checker.converged(iterationCounter.getCount(), previous, current)) {
  463.                         return Optimum.of(current, evaluationCounter.getCount(), iterationCounter.getCount());
  464.                     }
  465.                 } else {
  466.                     // failed iteration, reset the previous values
  467.                     currentCost = previousCost;
  468.                     for (int j = 0; j < solvedCols; ++j) {
  469.                         int pj = permutation[j];
  470.                         currentPoint[pj] = oldX[pj];
  471.                     }
  472.                     tmpVec    = weightedResidual;
  473.                     weightedResidual = oldRes;
  474.                     oldRes    = tmpVec;
  475.                     // Reset "current" to previous values.
  476.                     current = previous;
  477.                 }

  478.                 // Default convergence criteria.
  479.                 if ((FastMath.abs(actRed) <= costRelativeTolerance &&
  480.                      preRed <= costRelativeTolerance &&
  481.                      ratio <= 2.0) ||
  482.                     delta <= parRelativeTolerance * xNorm) {
  483.                     return Optimum.of(current, evaluationCounter.getCount(), iterationCounter.getCount());
  484.                 }

  485.                 // tests for termination and stringent tolerances
  486.                 if (FastMath.abs(actRed) <= TWO_EPS &&
  487.                     preRed <= TWO_EPS &&
  488.                     ratio <= 2.0) {
  489.                     throw new MathIllegalStateException(LocalizedOptimFormats.TOO_SMALL_COST_RELATIVE_TOLERANCE,
  490.                                                         costRelativeTolerance);
  491.                 } else if (delta <= TWO_EPS * xNorm) {
  492.                     throw new MathIllegalStateException(LocalizedOptimFormats.TOO_SMALL_PARAMETERS_RELATIVE_TOLERANCE,
  493.                                                         parRelativeTolerance);
  494.                 } else if (maxCosine <= TWO_EPS) {
  495.                     throw new MathIllegalStateException(LocalizedOptimFormats.TOO_SMALL_ORTHOGONALITY_TOLERANCE,
  496.                                                         orthoTolerance);
  497.                 }
  498.             }
  499.         }
  500.     }

  501.     /**
  502.      * Holds internal data.
  503.      * This structure was created so that all optimizer fields can be "final".
  504.      * Code should be further refactored in order to not pass around arguments
  505.      * that will modified in-place (cf. "work" arrays).
  506.      */
  507.     private static class InternalData {
  508.         /** Weighted Jacobian. */
  509.         private final double[][] weightedJacobian;
  510.         /** Columns permutation array. */
  511.         private final int[] permutation;
  512.         /** Rank of the Jacobian matrix. */
  513.         private final int rank;
  514.         /** Diagonal elements of the R matrix in the QR decomposition. */
  515.         private final double[] diagR;
  516.         /** Norms of the columns of the jacobian matrix. */
  517.         private final double[] jacNorm;
  518.         /** Coefficients of the Householder transforms vectors. */
  519.         private final double[] beta;

  520.         /**
  521.          * <p>
  522.          * All arrays are stored by reference
  523.          * </p>
  524.          * @param weightedJacobian Weighted Jacobian.
  525.          * @param permutation Columns permutation array.
  526.          * @param rank Rank of the Jacobian matrix.
  527.          * @param diagR Diagonal elements of the R matrix in the QR decomposition.
  528.          * @param jacNorm Norms of the columns of the jacobian matrix.
  529.          * @param beta Coefficients of the Householder transforms vectors.
  530.          */
  531.         InternalData(double[][] weightedJacobian,
  532.                      int[] permutation,
  533.                      int rank,
  534.                      double[] diagR,
  535.                      double[] jacNorm,
  536.                      double[] beta) {
  537.             this.weightedJacobian = weightedJacobian; // NOPMD - staring array references is intentional and documented here
  538.             this.permutation = permutation;           // NOPMD - staring array references is intentional and documented here
  539.             this.rank = rank;
  540.             this.diagR = diagR;                       // NOPMD - staring array references is intentional and documented here
  541.             this.jacNorm = jacNorm;                   // NOPMD - staring array references is intentional and documented here
  542.             this.beta = beta;                         // NOPMD - staring array references is intentional and documented here
  543.         }
  544.     }

  545.     /**
  546.      * Determines the Levenberg-Marquardt parameter.
  547.      *
  548.      * <p>This implementation is a translation in Java of the MINPACK
  549.      * <a href="http://www.netlib.org/minpack/lmpar.f">lmpar</a>
  550.      * routine.</p>
  551.      * <p>This method sets the lmPar and lmDir attributes.</p>
  552.      * <p>The authors of the original fortran function are:</p>
  553.      * <ul>
  554.      *   <li>Argonne National Laboratory. MINPACK project. March 1980</li>
  555.      *   <li>Burton  S. Garbow</li>
  556.      *   <li>Kenneth E. Hillstrom</li>
  557.      *   <li>Jorge   J. More</li>
  558.      * </ul>
  559.      * <p>Luc Maisonobe did the Java translation.</p>
  560.      *
  561.      * @param qy Array containing qTy.
  562.      * @param delta Upper bound on the euclidean norm of diagR * lmDir.
  563.      * @param diag Diagonal matrix.
  564.      * @param internalData Data (modified in-place in this method).
  565.      * @param solvedCols Number of solved point.
  566.      * @param work1 work array
  567.      * @param work2 work array
  568.      * @param work3 work array
  569.      * @param lmDir the "returned" LM direction will be stored in this array.
  570.      * @param lmPar the value of the LM parameter from the previous iteration.
  571.      * @return the new LM parameter
  572.      */
  573.     private double determineLMParameter(double[] qy, double delta, double[] diag,
  574.                                       InternalData internalData, int solvedCols,
  575.                                       double[] work1, double[] work2, double[] work3,
  576.                                       double[] lmDir, double lmPar) {
  577.         final double[][] weightedJacobian = internalData.weightedJacobian;
  578.         final int[] permutation = internalData.permutation;
  579.         final int rank = internalData.rank;
  580.         final double[] diagR = internalData.diagR;

  581.         final int nC = weightedJacobian[0].length;

  582.         // compute and store in x the gauss-newton direction, if the
  583.         // jacobian is rank-deficient, obtain a least squares solution
  584.         for (int j = 0; j < rank; ++j) {
  585.             lmDir[permutation[j]] = qy[j];
  586.         }
  587.         for (int j = rank; j < nC; ++j) {
  588.             lmDir[permutation[j]] = 0;
  589.         }
  590.         for (int k = rank - 1; k >= 0; --k) {
  591.             int pk = permutation[k];
  592.             double ypk = lmDir[pk] / diagR[pk];
  593.             for (int i = 0; i < k; ++i) {
  594.                 lmDir[permutation[i]] -= ypk * weightedJacobian[i][pk];
  595.             }
  596.             lmDir[pk] = ypk;
  597.         }

  598.         // evaluate the function at the origin, and test
  599.         // for acceptance of the Gauss-Newton direction
  600.         double dxNorm = 0;
  601.         for (int j = 0; j < solvedCols; ++j) {
  602.             int pj = permutation[j];
  603.             double s = diag[pj] * lmDir[pj];
  604.             work1[pj] = s;
  605.             dxNorm += s * s;
  606.         }
  607.         dxNorm = FastMath.sqrt(dxNorm);
  608.         double fp = dxNorm - delta;
  609.         if (fp <= 0.1 * delta) {
  610.             lmPar = 0;
  611.             return lmPar;
  612.         }

  613.         // if the jacobian is not rank deficient, the Newton step provides
  614.         // a lower bound, parl, for the zero of the function,
  615.         // otherwise set this bound to zero
  616.         double sum2;
  617.         double parl = 0;
  618.         if (rank == solvedCols) {
  619.             for (int j = 0; j < solvedCols; ++j) {
  620.                 int pj = permutation[j];
  621.                 work1[pj] *= diag[pj] / dxNorm;
  622.             }
  623.             sum2 = 0;
  624.             for (int j = 0; j < solvedCols; ++j) {
  625.                 int pj = permutation[j];
  626.                 double sum = 0;
  627.                 for (int i = 0; i < j; ++i) {
  628.                     sum += weightedJacobian[i][pj] * work1[permutation[i]];
  629.                 }
  630.                 double s = (work1[pj] - sum) / diagR[pj];
  631.                 work1[pj] = s;
  632.                 sum2 += s * s;
  633.             }
  634.             parl = fp / (delta * sum2);
  635.         }

  636.         // calculate an upper bound, paru, for the zero of the function
  637.         sum2 = 0;
  638.         for (int j = 0; j < solvedCols; ++j) {
  639.             int pj = permutation[j];
  640.             double sum = 0;
  641.             for (int i = 0; i <= j; ++i) {
  642.                 sum += weightedJacobian[i][pj] * qy[i];
  643.             }
  644.             sum /= diag[pj];
  645.             sum2 += sum * sum;
  646.         }
  647.         double gNorm = FastMath.sqrt(sum2);
  648.         double paru = gNorm / delta;
  649.         if (paru == 0) {
  650.             paru = Precision.SAFE_MIN / FastMath.min(delta, 0.1);
  651.         }

  652.         // if the input par lies outside of the interval (parl,paru),
  653.         // set par to the closer endpoint
  654.         lmPar = FastMath.min(paru, FastMath.max(lmPar, parl));
  655.         if (lmPar == 0) {
  656.             lmPar = gNorm / dxNorm;
  657.         }

  658.         for (int countdown = 10; countdown >= 0; --countdown) {

  659.             // evaluate the function at the current value of lmPar
  660.             if (lmPar == 0) {
  661.                 lmPar = FastMath.max(Precision.SAFE_MIN, 0.001 * paru);
  662.             }
  663.             double sPar = FastMath.sqrt(lmPar);
  664.             for (int j = 0; j < solvedCols; ++j) {
  665.                 int pj = permutation[j];
  666.                 work1[pj] = sPar * diag[pj];
  667.             }
  668.             determineLMDirection(qy, work1, work2, internalData, solvedCols, work3, lmDir);

  669.             dxNorm = 0;
  670.             for (int j = 0; j < solvedCols; ++j) {
  671.                 int pj = permutation[j];
  672.                 double s = diag[pj] * lmDir[pj];
  673.                 work3[pj] = s;
  674.                 dxNorm += s * s;
  675.             }
  676.             dxNorm = FastMath.sqrt(dxNorm);
  677.             double previousFP = fp;
  678.             fp = dxNorm - delta;

  679.             // if the function is small enough, accept the current value
  680.             // of lmPar, also test for the exceptional cases where parl is zero
  681.             if (FastMath.abs(fp) <= 0.1 * delta ||
  682.                 (parl == 0 &&
  683.                  fp <= previousFP &&
  684.                  previousFP < 0)) {
  685.                 return lmPar;
  686.             }

  687.             // compute the Newton correction
  688.             for (int j = 0; j < solvedCols; ++j) {
  689.                 int pj = permutation[j];
  690.                 work1[pj] = work3[pj] * diag[pj] / dxNorm;
  691.             }
  692.             for (int j = 0; j < solvedCols; ++j) {
  693.                 int pj = permutation[j];
  694.                 work1[pj] /= work2[j];
  695.                 double tmp = work1[pj];
  696.                 for (int i = j + 1; i < solvedCols; ++i) {
  697.                     work1[permutation[i]] -= weightedJacobian[i][pj] * tmp;
  698.                 }
  699.             }
  700.             sum2 = 0;
  701.             for (int j = 0; j < solvedCols; ++j) {
  702.                 double s = work1[permutation[j]];
  703.                 sum2 += s * s;
  704.             }
  705.             double correction = fp / (delta * sum2);

  706.             // depending on the sign of the function, update parl or paru.
  707.             if (fp > 0) {
  708.                 parl = FastMath.max(parl, lmPar);
  709.             } else if (fp < 0) {
  710.                 paru = FastMath.min(paru, lmPar);
  711.             }

  712.             // compute an improved estimate for lmPar
  713.             lmPar = FastMath.max(parl, lmPar + correction);
  714.         }

  715.         return lmPar;
  716.     }

  717.     /**
  718.      * Solve a*x = b and d*x = 0 in the least squares sense.
  719.      * <p>This implementation is a translation in Java of the MINPACK
  720.      * <a href="http://www.netlib.org/minpack/qrsolv.f">qrsolv</a>
  721.      * routine.</p>
  722.      * <p>This method sets the lmDir and lmDiag attributes.</p>
  723.      * <p>The authors of the original fortran function are:</p>
  724.      * <ul>
  725.      *   <li>Argonne National Laboratory. MINPACK project. March 1980</li>
  726.      *   <li>Burton  S. Garbow</li>
  727.      *   <li>Kenneth E. Hillstrom</li>
  728.      *   <li>Jorge   J. More</li>
  729.      * </ul>
  730.      * <p>Luc Maisonobe did the Java translation.</p>
  731.      *
  732.      * @param qy array containing qTy
  733.      * @param diag diagonal matrix
  734.      * @param lmDiag diagonal elements associated with lmDir
  735.      * @param internalData Data (modified in-place in this method).
  736.      * @param solvedCols Number of sloved point.
  737.      * @param work work array
  738.      * @param lmDir the "returned" LM direction is stored in this array
  739.      */
  740.     private void determineLMDirection(double[] qy, double[] diag,
  741.                                       double[] lmDiag,
  742.                                       InternalData internalData,
  743.                                       int solvedCols,
  744.                                       double[] work,
  745.                                       double[] lmDir) {
  746.         final int[] permutation = internalData.permutation;
  747.         final double[][] weightedJacobian = internalData.weightedJacobian;
  748.         final double[] diagR = internalData.diagR;

  749.         // copy R and Qty to preserve input and initialize s
  750.         //  in particular, save the diagonal elements of R in lmDir
  751.         for (int j = 0; j < solvedCols; ++j) {
  752.             int pj = permutation[j];
  753.             for (int i = j + 1; i < solvedCols; ++i) {
  754.                 weightedJacobian[i][pj] = weightedJacobian[j][permutation[i]];
  755.             }
  756.             lmDir[j] = diagR[pj];
  757.             work[j]  = qy[j];
  758.         }

  759.         // eliminate the diagonal matrix d using a Givens rotation
  760.         for (int j = 0; j < solvedCols; ++j) {

  761.             // prepare the row of d to be eliminated, locating the
  762.             // diagonal element using p from the Q.R. factorization
  763.             int pj = permutation[j];
  764.             double dpj = diag[pj];
  765.             if (dpj != 0) {
  766.                 Arrays.fill(lmDiag, j + 1, lmDiag.length, 0);
  767.             }
  768.             lmDiag[j] = dpj;

  769.             //  the transformations to eliminate the row of d
  770.             // modify only a single element of Qty
  771.             // beyond the first n, which is initially zero.
  772.             double qtbpj = 0;
  773.             for (int k = j; k < solvedCols; ++k) {
  774.                 int pk = permutation[k];

  775.                 // determine a Givens rotation which eliminates the
  776.                 // appropriate element in the current row of d
  777.                 if (lmDiag[k] != 0) {

  778.                     final double sin;
  779.                     final double cos;
  780.                     double rkk = weightedJacobian[k][pk];
  781.                     if (FastMath.abs(rkk) < FastMath.abs(lmDiag[k])) {
  782.                         final double cotan = rkk / lmDiag[k];
  783.                         sin   = 1.0 / FastMath.sqrt(1.0 + cotan * cotan);
  784.                         cos   = sin * cotan;
  785.                     } else {
  786.                         final double tan = lmDiag[k] / rkk;
  787.                         cos = 1.0 / FastMath.sqrt(1.0 + tan * tan);
  788.                         sin = cos * tan;
  789.                     }

  790.                     // compute the modified diagonal element of R and
  791.                     // the modified element of (Qty,0)
  792.                     weightedJacobian[k][pk] = cos * rkk + sin * lmDiag[k];
  793.                     final double temp = cos * work[k] + sin * qtbpj;
  794.                     qtbpj = -sin * work[k] + cos * qtbpj;
  795.                     work[k] = temp;

  796.                     // accumulate the tranformation in the row of s
  797.                     for (int i = k + 1; i < solvedCols; ++i) {
  798.                         double rik = weightedJacobian[i][pk];
  799.                         final double temp2 = cos * rik + sin * lmDiag[i];
  800.                         lmDiag[i] = -sin * rik + cos * lmDiag[i];
  801.                         weightedJacobian[i][pk] = temp2;
  802.                     }
  803.                 }
  804.             }

  805.             // store the diagonal element of s and restore
  806.             // the corresponding diagonal element of R
  807.             lmDiag[j] = weightedJacobian[j][permutation[j]];
  808.             weightedJacobian[j][permutation[j]] = lmDir[j];
  809.         }

  810.         // solve the triangular system for z, if the system is
  811.         // singular, then obtain a least squares solution
  812.         int nSing = solvedCols;
  813.         for (int j = 0; j < solvedCols; ++j) {
  814.             if ((lmDiag[j] == 0) && (nSing == solvedCols)) {
  815.                 nSing = j;
  816.             }
  817.             if (nSing < solvedCols) {
  818.                 work[j] = 0;
  819.             }
  820.         }
  821.         if (nSing > 0) {
  822.             for (int j = nSing - 1; j >= 0; --j) {
  823.                 int pj = permutation[j];
  824.                 double sum = 0;
  825.                 for (int i = j + 1; i < nSing; ++i) {
  826.                     sum += weightedJacobian[i][pj] * work[i];
  827.                 }
  828.                 work[j] = (work[j] - sum) / lmDiag[j];
  829.             }
  830.         }

  831.         // permute the components of z back to components of lmDir
  832.         for (int j = 0; j < lmDir.length; ++j) {
  833.             lmDir[permutation[j]] = work[j];
  834.         }
  835.     }

  836.     /**
  837.      * Decompose a matrix A as A.P = Q.R using Householder transforms.
  838.      * <p>As suggested in the P. Lascaux and R. Theodor book
  839.      * <i>Analyse num&eacute;rique matricielle appliqu&eacute;e &agrave;
  840.      * l'art de l'ing&eacute;nieur</i> (Masson, 1986), instead of representing
  841.      * the Householder transforms with u<sub>k</sub> unit vectors such that:
  842.      * <pre>
  843.      * H<sub>k</sub> = I - 2u<sub>k</sub>.u<sub>k</sub><sup>t</sup>
  844.      * </pre>
  845.      * we use <sub>k</sub> non-unit vectors such that:
  846.      * <pre>
  847.      * H<sub>k</sub> = I - beta<sub>k</sub>v<sub>k</sub>.v<sub>k</sub><sup>t</sup>
  848.      * </pre>
  849.      * where v<sub>k</sub> = a<sub>k</sub> - alpha<sub>k</sub> e<sub>k</sub>.
  850.      * The beta<sub>k</sub> coefficients are provided upon exit as recomputing
  851.      * them from the v<sub>k</sub> vectors would be costly.</p>
  852.      * <p>This decomposition handles rank deficient cases since the tranformations
  853.      * are performed in non-increasing columns norms order thanks to columns
  854.      * pivoting. The diagonal elements of the R matrix are therefore also in
  855.      * non-increasing absolute values order.</p>
  856.      *
  857.      * @param jacobian Weighted Jacobian matrix at the current point.
  858.      * @param solvedCols Number of solved point.
  859.      * @return data used in other methods of this class.
  860.      * @throws MathIllegalStateException if the decomposition cannot be performed.
  861.      */
  862.     private InternalData qrDecomposition(RealMatrix jacobian, int solvedCols)
  863.         throws MathIllegalStateException {
  864.         // Code in this class assumes that the weighted Jacobian is -(W^(1/2) J),
  865.         // hence the multiplication by -1.
  866.         final double[][] weightedJacobian = jacobian.scalarMultiply(-1).getData();

  867.         final int nR = weightedJacobian.length;
  868.         final int nC = weightedJacobian[0].length;

  869.         final int[] permutation = new int[nC];
  870.         final double[] diagR = new double[nC];
  871.         final double[] jacNorm = new double[nC];
  872.         final double[] beta = new double[nC];

  873.         // initializations
  874.         for (int k = 0; k < nC; ++k) {
  875.             permutation[k] = k;
  876.             double norm2 = 0;
  877.             for (double[] doubles : weightedJacobian) {
  878.                 double akk = doubles[k];
  879.                 norm2 += akk * akk;
  880.             }
  881.             jacNorm[k] = FastMath.sqrt(norm2);
  882.         }

  883.         // transform the matrix column after column
  884.         for (int k = 0; k < nC; ++k) {

  885.             // select the column with the greatest norm on active components
  886.             int nextColumn = -1;
  887.             double ak2 = Double.NEGATIVE_INFINITY;
  888.             for (int i = k; i < nC; ++i) {
  889.                 double norm2 = 0;
  890.                 for (int j = k; j < nR; ++j) {
  891.                     double aki = weightedJacobian[j][permutation[i]];
  892.                     norm2 += aki * aki;
  893.                 }
  894.                 if (Double.isInfinite(norm2) || Double.isNaN(norm2)) {
  895.                     throw new MathIllegalStateException(LocalizedOptimFormats.UNABLE_TO_PERFORM_QR_DECOMPOSITION_ON_JACOBIAN,
  896.                                                         nR, nC);
  897.                 }
  898.                 if (norm2 > ak2) {
  899.                     nextColumn = i;
  900.                     ak2        = norm2;
  901.                 }
  902.             }
  903.             if (ak2 <= qrRankingThreshold) {
  904.                 return new InternalData(weightedJacobian, permutation, k, diagR, jacNorm, beta);
  905.             }
  906.             int pk = permutation[nextColumn];
  907.             permutation[nextColumn] = permutation[k];
  908.             permutation[k] = pk;

  909.             // choose alpha such that Hk.u = alpha ek
  910.             double akk = weightedJacobian[k][pk];
  911.             double alpha = (akk > 0) ? -FastMath.sqrt(ak2) : FastMath.sqrt(ak2);
  912.             double betak = 1.0 / (ak2 - akk * alpha);
  913.             beta[pk] = betak;

  914.             // transform the current column
  915.             diagR[pk] = alpha;
  916.             weightedJacobian[k][pk] -= alpha;

  917.             // transform the remaining columns
  918.             for (int dk = nC - 1 - k; dk > 0; --dk) {
  919.                 double gamma = 0;
  920.                 for (int j = k; j < nR; ++j) {
  921.                     gamma += weightedJacobian[j][pk] * weightedJacobian[j][permutation[k + dk]];
  922.                 }
  923.                 gamma *= betak;
  924.                 for (int j = k; j < nR; ++j) {
  925.                     weightedJacobian[j][permutation[k + dk]] -= gamma * weightedJacobian[j][pk];
  926.                 }
  927.             }
  928.         }

  929.         return new InternalData(weightedJacobian, permutation, solvedCols, diagR, jacNorm, beta);
  930.     }

  931.     /**
  932.      * Compute the product Qt.y for some Q.R. decomposition.
  933.      *
  934.      * @param y vector to multiply (will be overwritten with the result)
  935.      * @param internalData Data.
  936.      */
  937.     private void qTy(double[] y,
  938.                      InternalData internalData) {
  939.         final double[][] weightedJacobian = internalData.weightedJacobian;
  940.         final int[] permutation = internalData.permutation;
  941.         final double[] beta = internalData.beta;

  942.         final int nR = weightedJacobian.length;
  943.         final int nC = weightedJacobian[0].length;

  944.         for (int k = 0; k < nC; ++k) {
  945.             int pk = permutation[k];
  946.             double gamma = 0;
  947.             for (int i = k; i < nR; ++i) {
  948.                 gamma += weightedJacobian[i][pk] * y[i];
  949.             }
  950.             gamma *= beta[pk];
  951.             for (int i = k; i < nR; ++i) {
  952.                 y[i] -= gamma * weightedJacobian[i][pk];
  953.             }
  954.         }
  955.     }
  956. }