1 /*
2 * Licensed to the Apache Software Foundation (ASF) under one or more
3 * contributor license agreements. See the NOTICE file distributed with
4 * this work for additional information regarding copyright ownership.
5 * The ASF licenses this file to You under the Apache License, Version 2.0
6 * (the "License"); you may not use this file except in compliance with
7 * the License. You may obtain a copy of the License at
8 *
9 * https://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 /*
19 * This is not the original file distributed by the Apache Software Foundation
20 * It has been modified by the Hipparchus project
21 */
22 package org.hipparchus.optim.nonlinear.vector.leastsquares;
23
24 import java.util.Arrays;
25
26 import org.hipparchus.exception.MathIllegalStateException;
27 import org.hipparchus.linear.ArrayRealVector;
28 import org.hipparchus.linear.RealMatrix;
29 import org.hipparchus.optim.ConvergenceChecker;
30 import org.hipparchus.optim.LocalizedOptimFormats;
31 import org.hipparchus.optim.nonlinear.vector.leastsquares.LeastSquaresProblem.Evaluation;
32 import org.hipparchus.util.FastMath;
33 import org.hipparchus.util.Incrementor;
34 import org.hipparchus.util.Precision;
35
36
37 /**
38 * This class solves a least-squares problem using the Levenberg-Marquardt
39 * algorithm.
40 *
41 * <p>This implementation <em>should</em> work even for over-determined systems
42 * (i.e. systems having more point than equations). Over-determined systems
43 * are solved by ignoring the point which have the smallest impact according
44 * to their jacobian column norm. Only the rank of the matrix and some loop bounds
45 * are changed to implement this.</p>
46 *
47 * <p>The resolution engine is a simple translation of the MINPACK <a
48 * href="http://www.netlib.org/minpack/lmder.f">lmder</a> routine with minor
49 * changes. The changes include the over-determined resolution, the use of
50 * inherited convergence checker and the Q.R. decomposition which has been
51 * rewritten following the algorithm described in the
52 * P. Lascaux and R. Theodor book <i>Analyse numérique matricielle
53 * appliquée à l'art de l'ingénieur</i>, Masson 1986.</p>
54 * <p>The authors of the original fortran version are:</p>
55 * <ul>
56 * <li>Argonne National Laboratory. MINPACK project. March 1980</li>
57 * <li>Burton S. Garbow</li>
58 * <li>Kenneth E. Hillstrom</li>
59 * <li>Jorge J. More</li>
60 * </ul>
61 *<p>The redistribution policy for MINPACK is available <a
62 * href="http://www.netlib.org/minpack/disclaimer">here</a>, for convenience, it
63 * is reproduced below.</p>
64 *
65 * <blockquote>
66 * <p>
67 * Minpack Copyright Notice (1999) University of Chicago.
68 * All rights reserved
69 * </p>
70 * <p>
71 * Redistribution and use in source and binary forms, with or without
72 * modification, are permitted provided that the following conditions
73 * are met:</p>
74 * <ol>
75 * <li>Redistributions of source code must retain the above copyright
76 * notice, this list of conditions and the following disclaimer.</li>
77 * <li>Redistributions in binary form must reproduce the above
78 * copyright notice, this list of conditions and the following
79 * disclaimer in the documentation and/or other materials provided
80 * with the distribution.</li>
81 * <li>The end-user documentation included with the redistribution, if any,
82 * must include the following acknowledgment:
83 * <code>This product includes software developed by the University of
84 * Chicago, as Operator of Argonne National Laboratory.</code>
85 * Alternately, this acknowledgment may appear in the software itself,
86 * if and wherever such third-party acknowledgments normally appear.</li>
87 * <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
88 * WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
89 * UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
90 * THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
91 * IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
92 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
93 * OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
94 * OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
95 * USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
96 * THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
97 * DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
98 * UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
99 * BE CORRECTED.</strong></li>
100 * <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
101 * HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
102 * ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
103 * INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
104 * ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
105 * PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
106 * SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
107 * (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
108 * EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
109 * POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
110 * </ol>
111 * </blockquote>
112 *
113 */
114 public class LevenbergMarquardtOptimizer implements LeastSquaresOptimizer {
115
116 /** Twice the "epsilon machine". */
117 private static final double TWO_EPS = 2 * Precision.EPSILON;
118
119 /* configuration parameters */
120 /** Positive input variable used in determining the initial step bound. */
121 private final double initialStepBoundFactor;
122 /** Desired relative error in the sum of squares. */
123 private final double costRelativeTolerance;
124 /** Desired relative error in the approximate solution parameters. */
125 private final double parRelativeTolerance;
126 /** Desired max cosine on the orthogonality between the function vector
127 * and the columns of the jacobian. */
128 private final double orthoTolerance;
129 /** Threshold for QR ranking. */
130 private final double qrRankingThreshold;
131
132 /** Default constructor.
133 * <p>
134 * The default values for the algorithm settings are:
135 * <ul>
136 * <li>Initial step bound factor: 100</li>
137 * <li>Cost relative tolerance: 1e-10</li>
138 * <li>Parameters relative tolerance: 1e-10</li>
139 * <li>Orthogonality tolerance: 1e-10</li>
140 * <li>QR ranking threshold: {@link Precision#SAFE_MIN}</li>
141 * </ul>
142 **/
143 public LevenbergMarquardtOptimizer() {
144 this(100, 1e-10, 1e-10, 1e-10, Precision.SAFE_MIN);
145 }
146
147 /**
148 * Construct an instance with all parameters specified.
149 *
150 * @param initialStepBoundFactor initial step bound factor
151 * @param costRelativeTolerance cost relative tolerance
152 * @param parRelativeTolerance parameters relative tolerance
153 * @param orthoTolerance orthogonality tolerance
154 * @param qrRankingThreshold threshold in the QR decomposition. Columns with a 2
155 * norm less than this threshold are considered to be
156 * all 0s.
157 */
158 public LevenbergMarquardtOptimizer(
159 final double initialStepBoundFactor,
160 final double costRelativeTolerance,
161 final double parRelativeTolerance,
162 final double orthoTolerance,
163 final double qrRankingThreshold) {
164 this.initialStepBoundFactor = initialStepBoundFactor;
165 this.costRelativeTolerance = costRelativeTolerance;
166 this.parRelativeTolerance = parRelativeTolerance;
167 this.orthoTolerance = orthoTolerance;
168 this.qrRankingThreshold = qrRankingThreshold;
169 }
170
171 /** Build new instance with initial step bound factor.
172 * @param newInitialStepBoundFactor Positive input variable used in
173 * determining the initial step bound. This bound is set to the
174 * product of initialStepBoundFactor and the euclidean norm of
175 * {@code diag * x} if non-zero, or else to {@code newInitialStepBoundFactor}
176 * itself. In most cases factor should lie in the interval
177 * {@code (0.1, 100.0)}. {@code 100} is a generally recommended value.
178 * of the matrix is reduced.
179 * @return a new instance.
180 */
181 public LevenbergMarquardtOptimizer withInitialStepBoundFactor(double newInitialStepBoundFactor) {
182 return new LevenbergMarquardtOptimizer(
183 newInitialStepBoundFactor,
184 costRelativeTolerance,
185 parRelativeTolerance,
186 orthoTolerance,
187 qrRankingThreshold);
188 }
189
190 /** Build new instance with cost relative tolerance.
191 * @param newCostRelativeTolerance Desired relative error in the sum of squares.
192 * @return a new instance.
193 */
194 public LevenbergMarquardtOptimizer withCostRelativeTolerance(double newCostRelativeTolerance) {
195 return new LevenbergMarquardtOptimizer(
196 initialStepBoundFactor,
197 newCostRelativeTolerance,
198 parRelativeTolerance,
199 orthoTolerance,
200 qrRankingThreshold);
201 }
202
203 /** Build new instance with parameter relative tolerance.
204 * @param newParRelativeTolerance Desired relative error in the approximate solution
205 * parameters.
206 * @return a new instance.
207 */
208 public LevenbergMarquardtOptimizer withParameterRelativeTolerance(double newParRelativeTolerance) {
209 return new LevenbergMarquardtOptimizer(
210 initialStepBoundFactor,
211 costRelativeTolerance,
212 newParRelativeTolerance,
213 orthoTolerance,
214 qrRankingThreshold);
215 }
216
217 /** Build new instance with ortho tolerance.
218 * @param newOrthoTolerance Desired max cosine on the orthogonality between
219 * the function vector and the columns of the Jacobian.
220 * @return a new instance.
221 */
222 public LevenbergMarquardtOptimizer withOrthoTolerance(double newOrthoTolerance) {
223 return new LevenbergMarquardtOptimizer(
224 initialStepBoundFactor,
225 costRelativeTolerance,
226 parRelativeTolerance,
227 newOrthoTolerance,
228 qrRankingThreshold);
229 }
230
231 /** Build new instance with ranking threshold.
232 * @param newQRRankingThreshold Desired threshold for QR ranking.
233 * If the squared norm of a column vector is smaller or equal to this
234 * threshold during QR decomposition, it is considered to be a zero vector
235 * and hence the rank of the matrix is reduced.
236 * @return a new instance.
237 */
238 public LevenbergMarquardtOptimizer withRankingThreshold(double newQRRankingThreshold) {
239 return new LevenbergMarquardtOptimizer(
240 initialStepBoundFactor,
241 costRelativeTolerance,
242 parRelativeTolerance,
243 orthoTolerance,
244 newQRRankingThreshold);
245 }
246
247 /**
248 * Gets the value of a tuning parameter.
249 * @see #withInitialStepBoundFactor(double)
250 *
251 * @return the parameter's value.
252 */
253 public double getInitialStepBoundFactor() {
254 return initialStepBoundFactor;
255 }
256
257 /**
258 * Gets the value of a tuning parameter.
259 * @see #withCostRelativeTolerance(double)
260 *
261 * @return the parameter's value.
262 */
263 public double getCostRelativeTolerance() {
264 return costRelativeTolerance;
265 }
266
267 /**
268 * Gets the value of a tuning parameter.
269 * @see #withParameterRelativeTolerance(double)
270 *
271 * @return the parameter's value.
272 */
273 public double getParameterRelativeTolerance() {
274 return parRelativeTolerance;
275 }
276
277 /**
278 * Gets the value of a tuning parameter.
279 * @see #withOrthoTolerance(double)
280 *
281 * @return the parameter's value.
282 */
283 public double getOrthoTolerance() {
284 return orthoTolerance;
285 }
286
287 /**
288 * Gets the value of a tuning parameter.
289 * @see #withRankingThreshold(double)
290 *
291 * @return the parameter's value.
292 */
293 public double getRankingThreshold() {
294 return qrRankingThreshold;
295 }
296
297 /** {@inheritDoc} */
298 @Override
299 public Optimum optimize(final LeastSquaresProblem problem) {
300 // Pull in relevant data from the problem as locals.
301 final int nR = problem.getObservationSize(); // Number of observed data.
302 final int nC = problem.getParameterSize(); // Number of parameters.
303 // Counters.
304 final Incrementor iterationCounter = problem.getIterationCounter();
305 final Incrementor evaluationCounter = problem.getEvaluationCounter();
306 // Convergence criterion.
307 final ConvergenceChecker<Evaluation> checker = problem.getConvergenceChecker();
308
309 // arrays shared with the other private methods
310 final int solvedCols = FastMath.min(nR, nC);
311 /* Parameters evolution direction associated with lmPar. */
312 double[] lmDir = new double[nC];
313 /* Levenberg-Marquardt parameter. */
314 double lmPar = 0;
315
316 // local point
317 double delta = 0;
318 double xNorm = 0;
319 double[] diag = new double[nC];
320 double[] oldX = new double[nC];
321 double[] oldRes = new double[nR];
322 double[] qtf = new double[nR];
323 double[] work1 = new double[nC];
324 double[] work2 = new double[nC];
325 double[] work3 = new double[nC];
326
327
328 // Evaluate the function at the starting point and calculate its norm.
329 evaluationCounter.increment();
330 //value will be reassigned in the loop
331 Evaluation current = problem.evaluate(problem.getStart());
332 double[] currentResiduals = current.getResiduals().toArray();
333 double currentCost = current.getCost();
334 double[] currentPoint = current.getPoint().toArray();
335
336 // Outer loop.
337 boolean firstIteration = true;
338 while (true) {
339 iterationCounter.increment();
340
341 final Evaluation previous = current;
342
343 // QR decomposition of the jacobian matrix
344 final InternalData internalData = qrDecomposition(current.getJacobian(), solvedCols);
345 final double[][] weightedJacobian = internalData.weightedJacobian;
346 final int[] permutation = internalData.permutation;
347 final double[] diagR = internalData.diagR;
348 final double[] jacNorm = internalData.jacNorm;
349
350 //residuals already have weights applied
351 double[] weightedResidual = currentResiduals;
352 System.arraycopy(weightedResidual, 0, qtf, 0, nR);
353
354 // compute Qt.res
355 qTy(qtf, internalData);
356
357 // now we don't need Q anymore,
358 // so let jacobian contain the R matrix with its diagonal elements
359 for (int k = 0; k < solvedCols; ++k) {
360 int pk = permutation[k];
361 weightedJacobian[k][pk] = diagR[pk];
362 }
363
364 if (firstIteration) {
365 // scale the point according to the norms of the columns
366 // of the initial jacobian
367 xNorm = 0;
368 for (int k = 0; k < nC; ++k) {
369 double dk = jacNorm[k];
370 if (dk == 0) {
371 dk = 1.0;
372 }
373 double xk = dk * currentPoint[k];
374 xNorm += xk * xk;
375 diag[k] = dk;
376 }
377 xNorm = FastMath.sqrt(xNorm);
378
379 // initialize the step bound delta
380 delta = (xNorm == 0) ? initialStepBoundFactor : (initialStepBoundFactor * xNorm);
381 }
382
383 // check orthogonality between function vector and jacobian columns
384 double maxCosine = 0;
385 if (currentCost != 0) {
386 for (int j = 0; j < solvedCols; ++j) {
387 int pj = permutation[j];
388 double s = jacNorm[pj];
389 if (s != 0) {
390 double sum = 0;
391 for (int i = 0; i <= j; ++i) {
392 sum += weightedJacobian[i][pj] * qtf[i];
393 }
394 maxCosine = FastMath.max(maxCosine, FastMath.abs(sum) / (s * currentCost));
395 }
396 }
397 }
398 if (maxCosine <= orthoTolerance) {
399 // Convergence has been reached.
400 return Optimum.of(
401 current,
402 evaluationCounter.getCount(),
403 iterationCounter.getCount());
404 }
405
406 // rescale if necessary
407 for (int j = 0; j < nC; ++j) {
408 diag[j] = FastMath.max(diag[j], jacNorm[j]);
409 }
410
411 // Inner loop.
412 for (double ratio = 0; ratio < 1.0e-4;) {
413
414 // save the state
415 for (int j = 0; j < solvedCols; ++j) {
416 int pj = permutation[j];
417 oldX[pj] = currentPoint[pj];
418 }
419 final double previousCost = currentCost;
420 double[] tmpVec = weightedResidual;
421 weightedResidual = oldRes;
422 oldRes = tmpVec;
423
424 // determine the Levenberg-Marquardt parameter
425 lmPar = determineLMParameter(qtf, delta, diag,
426 internalData, solvedCols,
427 work1, work2, work3, lmDir, lmPar);
428
429 // compute the new point and the norm of the evolution direction
430 double lmNorm = 0;
431 for (int j = 0; j < solvedCols; ++j) {
432 int pj = permutation[j];
433 lmDir[pj] = -lmDir[pj];
434 currentPoint[pj] = oldX[pj] + lmDir[pj];
435 double s = diag[pj] * lmDir[pj];
436 lmNorm += s * s;
437 }
438 lmNorm = FastMath.sqrt(lmNorm);
439 // on the first iteration, adjust the initial step bound.
440 if (firstIteration) {
441 delta = FastMath.min(delta, lmNorm);
442 }
443
444 // Evaluate the function at x + p and calculate its norm.
445 evaluationCounter.increment();
446 current = problem.evaluate(new ArrayRealVector(currentPoint));
447 currentResiduals = current.getResiduals().toArray();
448 currentCost = current.getCost();
449 currentPoint = current.getPoint().toArray();
450
451 // compute the scaled actual reduction
452 double actRed = -1.0;
453 if (0.1 * currentCost < previousCost) {
454 double r = currentCost / previousCost;
455 actRed = 1.0 - r * r;
456 }
457
458 // compute the scaled predicted reduction
459 // and the scaled directional derivative
460 for (int j = 0; j < solvedCols; ++j) {
461 int pj = permutation[j];
462 double dirJ = lmDir[pj];
463 work1[j] = 0;
464 for (int i = 0; i <= j; ++i) {
465 work1[i] += weightedJacobian[i][pj] * dirJ;
466 }
467 }
468 double coeff1 = 0;
469 for (int j = 0; j < solvedCols; ++j) {
470 coeff1 += work1[j] * work1[j];
471 }
472 double pc2 = previousCost * previousCost;
473 coeff1 /= pc2;
474 double coeff2 = lmPar * lmNorm * lmNorm / pc2;
475 double preRed = coeff1 + 2 * coeff2;
476 double dirDer = -(coeff1 + coeff2);
477
478 // ratio of the actual to the predicted reduction
479 ratio = (preRed == 0) ? 0 : (actRed / preRed);
480
481 // update the step bound
482 if (ratio <= 0.25) {
483 double tmp =
484 (actRed < 0) ? (0.5 * dirDer / (dirDer + 0.5 * actRed)) : 0.5;
485 if ((0.1 * currentCost >= previousCost) || (tmp < 0.1)) {
486 tmp = 0.1;
487 }
488 delta = tmp * FastMath.min(delta, 10.0 * lmNorm);
489 lmPar /= tmp;
490 } else if ((lmPar == 0) || (ratio >= 0.75)) {
491 delta = 2 * lmNorm;
492 lmPar *= 0.5;
493 }
494
495 // test for successful iteration.
496 if (ratio >= 1.0e-4) {
497 // successful iteration, update the norm
498 firstIteration = false;
499 xNorm = 0;
500 for (int k = 0; k < nC; ++k) {
501 double xK = diag[k] * currentPoint[k];
502 xNorm += xK * xK;
503 }
504 xNorm = FastMath.sqrt(xNorm);
505
506 // tests for convergence.
507 if (checker != null && checker.converged(iterationCounter.getCount(), previous, current)) {
508 return Optimum.of(current, evaluationCounter.getCount(), iterationCounter.getCount());
509 }
510 } else {
511 // failed iteration, reset the previous values
512 currentCost = previousCost;
513 for (int j = 0; j < solvedCols; ++j) {
514 int pj = permutation[j];
515 currentPoint[pj] = oldX[pj];
516 }
517 tmpVec = weightedResidual;
518 weightedResidual = oldRes;
519 oldRes = tmpVec;
520 // Reset "current" to previous values.
521 current = previous;
522 }
523
524 // Default convergence criteria.
525 if ((FastMath.abs(actRed) <= costRelativeTolerance &&
526 preRed <= costRelativeTolerance &&
527 ratio <= 2.0) ||
528 delta <= parRelativeTolerance * xNorm) {
529 return Optimum.of(current, evaluationCounter.getCount(), iterationCounter.getCount());
530 }
531
532 // tests for termination and stringent tolerances
533 if (FastMath.abs(actRed) <= TWO_EPS &&
534 preRed <= TWO_EPS &&
535 ratio <= 2.0) {
536 throw new MathIllegalStateException(LocalizedOptimFormats.TOO_SMALL_COST_RELATIVE_TOLERANCE,
537 costRelativeTolerance);
538 } else if (delta <= TWO_EPS * xNorm) {
539 throw new MathIllegalStateException(LocalizedOptimFormats.TOO_SMALL_PARAMETERS_RELATIVE_TOLERANCE,
540 parRelativeTolerance);
541 } else if (maxCosine <= TWO_EPS) {
542 throw new MathIllegalStateException(LocalizedOptimFormats.TOO_SMALL_ORTHOGONALITY_TOLERANCE,
543 orthoTolerance);
544 }
545 }
546 }
547 }
548
549 /**
550 * Holds internal data.
551 * This structure was created so that all optimizer fields can be "final".
552 * Code should be further refactored in order to not pass around arguments
553 * that will modified in-place (cf. "work" arrays).
554 */
555 private static class InternalData {
556 /** Weighted Jacobian. */
557 private final double[][] weightedJacobian;
558 /** Columns permutation array. */
559 private final int[] permutation;
560 /** Rank of the Jacobian matrix. */
561 private final int rank;
562 /** Diagonal elements of the R matrix in the QR decomposition. */
563 private final double[] diagR;
564 /** Norms of the columns of the jacobian matrix. */
565 private final double[] jacNorm;
566 /** Coefficients of the Householder transforms vectors. */
567 private final double[] beta;
568
569 /**
570 * <p>
571 * All arrays are stored by reference
572 * </p>
573 * @param weightedJacobian Weighted Jacobian.
574 * @param permutation Columns permutation array.
575 * @param rank Rank of the Jacobian matrix.
576 * @param diagR Diagonal elements of the R matrix in the QR decomposition.
577 * @param jacNorm Norms of the columns of the jacobian matrix.
578 * @param beta Coefficients of the Householder transforms vectors.
579 */
580 InternalData(double[][] weightedJacobian,
581 int[] permutation,
582 int rank,
583 double[] diagR,
584 double[] jacNorm,
585 double[] beta) {
586 this.weightedJacobian = weightedJacobian; // NOPMD - staring array references is intentional and documented here
587 this.permutation = permutation; // NOPMD - staring array references is intentional and documented here
588 this.rank = rank;
589 this.diagR = diagR; // NOPMD - staring array references is intentional and documented here
590 this.jacNorm = jacNorm; // NOPMD - staring array references is intentional and documented here
591 this.beta = beta; // NOPMD - staring array references is intentional and documented here
592 }
593 }
594
595 /**
596 * Determines the Levenberg-Marquardt parameter.
597 *
598 * <p>This implementation is a translation in Java of the MINPACK
599 * <a href="http://www.netlib.org/minpack/lmpar.f">lmpar</a>
600 * routine.</p>
601 * <p>This method sets the lmPar and lmDir attributes.</p>
602 * <p>The authors of the original fortran function are:</p>
603 * <ul>
604 * <li>Argonne National Laboratory. MINPACK project. March 1980</li>
605 * <li>Burton S. Garbow</li>
606 * <li>Kenneth E. Hillstrom</li>
607 * <li>Jorge J. More</li>
608 * </ul>
609 * <p>Luc Maisonobe did the Java translation.</p>
610 *
611 * @param qy Array containing qTy.
612 * @param delta Upper bound on the euclidean norm of diagR * lmDir.
613 * @param diag Diagonal matrix.
614 * @param internalData Data (modified in-place in this method).
615 * @param solvedCols Number of solved point.
616 * @param work1 work array
617 * @param work2 work array
618 * @param work3 work array
619 * @param lmDir the "returned" LM direction will be stored in this array.
620 * @param lmPar the value of the LM parameter from the previous iteration.
621 * @return the new LM parameter
622 */
623 private double determineLMParameter(double[] qy, double delta, double[] diag,
624 InternalData internalData, int solvedCols,
625 double[] work1, double[] work2, double[] work3,
626 double[] lmDir, double lmPar) {
627 final double[][] weightedJacobian = internalData.weightedJacobian;
628 final int[] permutation = internalData.permutation;
629 final int rank = internalData.rank;
630 final double[] diagR = internalData.diagR;
631
632 final int nC = weightedJacobian[0].length;
633
634 // compute and store in x the gauss-newton direction, if the
635 // jacobian is rank-deficient, obtain a least squares solution
636 for (int j = 0; j < rank; ++j) {
637 lmDir[permutation[j]] = qy[j];
638 }
639 for (int j = rank; j < nC; ++j) {
640 lmDir[permutation[j]] = 0;
641 }
642 for (int k = rank - 1; k >= 0; --k) {
643 int pk = permutation[k];
644 double ypk = lmDir[pk] / diagR[pk];
645 for (int i = 0; i < k; ++i) {
646 lmDir[permutation[i]] -= ypk * weightedJacobian[i][pk];
647 }
648 lmDir[pk] = ypk;
649 }
650
651 // evaluate the function at the origin, and test
652 // for acceptance of the Gauss-Newton direction
653 double dxNorm = 0;
654 for (int j = 0; j < solvedCols; ++j) {
655 int pj = permutation[j];
656 double s = diag[pj] * lmDir[pj];
657 work1[pj] = s;
658 dxNorm += s * s;
659 }
660 dxNorm = FastMath.sqrt(dxNorm);
661 double fp = dxNorm - delta;
662 if (fp <= 0.1 * delta) {
663 lmPar = 0;
664 return lmPar;
665 }
666
667 // if the jacobian is not rank deficient, the Newton step provides
668 // a lower bound, parl, for the zero of the function,
669 // otherwise set this bound to zero
670 double sum2;
671 double parl = 0;
672 if (rank == solvedCols) {
673 for (int j = 0; j < solvedCols; ++j) {
674 int pj = permutation[j];
675 work1[pj] *= diag[pj] / dxNorm;
676 }
677 sum2 = 0;
678 for (int j = 0; j < solvedCols; ++j) {
679 int pj = permutation[j];
680 double sum = 0;
681 for (int i = 0; i < j; ++i) {
682 sum += weightedJacobian[i][pj] * work1[permutation[i]];
683 }
684 double s = (work1[pj] - sum) / diagR[pj];
685 work1[pj] = s;
686 sum2 += s * s;
687 }
688 parl = fp / (delta * sum2);
689 }
690
691 // calculate an upper bound, paru, for the zero of the function
692 sum2 = 0;
693 for (int j = 0; j < solvedCols; ++j) {
694 int pj = permutation[j];
695 double sum = 0;
696 for (int i = 0; i <= j; ++i) {
697 sum += weightedJacobian[i][pj] * qy[i];
698 }
699 sum /= diag[pj];
700 sum2 += sum * sum;
701 }
702 double gNorm = FastMath.sqrt(sum2);
703 double paru = gNorm / delta;
704 if (paru == 0) {
705 paru = Precision.SAFE_MIN / FastMath.min(delta, 0.1);
706 }
707
708 // if the input par lies outside of the interval (parl,paru),
709 // set par to the closer endpoint
710 lmPar = FastMath.min(paru, FastMath.max(lmPar, parl));
711 if (lmPar == 0) {
712 lmPar = gNorm / dxNorm;
713 }
714
715 for (int countdown = 10; countdown >= 0; --countdown) {
716
717 // evaluate the function at the current value of lmPar
718 if (lmPar == 0) {
719 lmPar = FastMath.max(Precision.SAFE_MIN, 0.001 * paru);
720 }
721 double sPar = FastMath.sqrt(lmPar);
722 for (int j = 0; j < solvedCols; ++j) {
723 int pj = permutation[j];
724 work1[pj] = sPar * diag[pj];
725 }
726 determineLMDirection(qy, work1, work2, internalData, solvedCols, work3, lmDir);
727
728 dxNorm = 0;
729 for (int j = 0; j < solvedCols; ++j) {
730 int pj = permutation[j];
731 double s = diag[pj] * lmDir[pj];
732 work3[pj] = s;
733 dxNorm += s * s;
734 }
735 dxNorm = FastMath.sqrt(dxNorm);
736 double previousFP = fp;
737 fp = dxNorm - delta;
738
739 // if the function is small enough, accept the current value
740 // of lmPar, also test for the exceptional cases where parl is zero
741 if (FastMath.abs(fp) <= 0.1 * delta ||
742 (parl == 0 &&
743 fp <= previousFP &&
744 previousFP < 0)) {
745 return lmPar;
746 }
747
748 // compute the Newton correction
749 for (int j = 0; j < solvedCols; ++j) {
750 int pj = permutation[j];
751 work1[pj] = work3[pj] * diag[pj] / dxNorm;
752 }
753 for (int j = 0; j < solvedCols; ++j) {
754 int pj = permutation[j];
755 work1[pj] /= work2[j];
756 double tmp = work1[pj];
757 for (int i = j + 1; i < solvedCols; ++i) {
758 work1[permutation[i]] -= weightedJacobian[i][pj] * tmp;
759 }
760 }
761 sum2 = 0;
762 for (int j = 0; j < solvedCols; ++j) {
763 double s = work1[permutation[j]];
764 sum2 += s * s;
765 }
766 double correction = fp / (delta * sum2);
767
768 // depending on the sign of the function, update parl or paru.
769 if (fp > 0) {
770 parl = FastMath.max(parl, lmPar);
771 } else if (fp < 0) {
772 paru = FastMath.min(paru, lmPar);
773 }
774
775 // compute an improved estimate for lmPar
776 lmPar = FastMath.max(parl, lmPar + correction);
777 }
778
779 return lmPar;
780 }
781
782 /**
783 * Solve a*x = b and d*x = 0 in the least squares sense.
784 * <p>This implementation is a translation in Java of the MINPACK
785 * <a href="http://www.netlib.org/minpack/qrsolv.f">qrsolv</a>
786 * routine.</p>
787 * <p>This method sets the lmDir and lmDiag attributes.</p>
788 * <p>The authors of the original fortran function are:</p>
789 * <ul>
790 * <li>Argonne National Laboratory. MINPACK project. March 1980</li>
791 * <li>Burton S. Garbow</li>
792 * <li>Kenneth E. Hillstrom</li>
793 * <li>Jorge J. More</li>
794 * </ul>
795 * <p>Luc Maisonobe did the Java translation.</p>
796 *
797 * @param qy array containing qTy
798 * @param diag diagonal matrix
799 * @param lmDiag diagonal elements associated with lmDir
800 * @param internalData Data (modified in-place in this method).
801 * @param solvedCols Number of sloved point.
802 * @param work work array
803 * @param lmDir the "returned" LM direction is stored in this array
804 */
805 private void determineLMDirection(double[] qy, double[] diag,
806 double[] lmDiag,
807 InternalData internalData,
808 int solvedCols,
809 double[] work,
810 double[] lmDir) {
811 final int[] permutation = internalData.permutation;
812 final double[][] weightedJacobian = internalData.weightedJacobian;
813 final double[] diagR = internalData.diagR;
814
815 // copy R and Qty to preserve input and initialize s
816 // in particular, save the diagonal elements of R in lmDir
817 for (int j = 0; j < solvedCols; ++j) {
818 int pj = permutation[j];
819 for (int i = j + 1; i < solvedCols; ++i) {
820 weightedJacobian[i][pj] = weightedJacobian[j][permutation[i]];
821 }
822 lmDir[j] = diagR[pj];
823 work[j] = qy[j];
824 }
825
826 // eliminate the diagonal matrix d using a Givens rotation
827 for (int j = 0; j < solvedCols; ++j) {
828
829 // prepare the row of d to be eliminated, locating the
830 // diagonal element using p from the Q.R. factorization
831 int pj = permutation[j];
832 double dpj = diag[pj];
833 if (dpj != 0) {
834 Arrays.fill(lmDiag, j + 1, lmDiag.length, 0);
835 }
836 lmDiag[j] = dpj;
837
838 // the transformations to eliminate the row of d
839 // modify only a single element of Qty
840 // beyond the first n, which is initially zero.
841 double qtbpj = 0;
842 for (int k = j; k < solvedCols; ++k) {
843 int pk = permutation[k];
844
845 // determine a Givens rotation which eliminates the
846 // appropriate element in the current row of d
847 if (lmDiag[k] != 0) {
848
849 final double sin;
850 final double cos;
851 double rkk = weightedJacobian[k][pk];
852 if (FastMath.abs(rkk) < FastMath.abs(lmDiag[k])) {
853 final double cotan = rkk / lmDiag[k];
854 sin = 1.0 / FastMath.sqrt(1.0 + cotan * cotan);
855 cos = sin * cotan;
856 } else {
857 final double tan = lmDiag[k] / rkk;
858 cos = 1.0 / FastMath.sqrt(1.0 + tan * tan);
859 sin = cos * tan;
860 }
861
862 // compute the modified diagonal element of R and
863 // the modified element of (Qty,0)
864 weightedJacobian[k][pk] = cos * rkk + sin * lmDiag[k];
865 final double temp = cos * work[k] + sin * qtbpj;
866 qtbpj = -sin * work[k] + cos * qtbpj;
867 work[k] = temp;
868
869 // accumulate the tranformation in the row of s
870 for (int i = k + 1; i < solvedCols; ++i) {
871 double rik = weightedJacobian[i][pk];
872 final double temp2 = cos * rik + sin * lmDiag[i];
873 lmDiag[i] = -sin * rik + cos * lmDiag[i];
874 weightedJacobian[i][pk] = temp2;
875 }
876 }
877 }
878
879 // store the diagonal element of s and restore
880 // the corresponding diagonal element of R
881 lmDiag[j] = weightedJacobian[j][permutation[j]];
882 weightedJacobian[j][permutation[j]] = lmDir[j];
883 }
884
885 // solve the triangular system for z, if the system is
886 // singular, then obtain a least squares solution
887 int nSing = solvedCols;
888 for (int j = 0; j < solvedCols; ++j) {
889 if ((lmDiag[j] == 0) && (nSing == solvedCols)) {
890 nSing = j;
891 }
892 if (nSing < solvedCols) {
893 work[j] = 0;
894 }
895 }
896 if (nSing > 0) {
897 for (int j = nSing - 1; j >= 0; --j) {
898 int pj = permutation[j];
899 double sum = 0;
900 for (int i = j + 1; i < nSing; ++i) {
901 sum += weightedJacobian[i][pj] * work[i];
902 }
903 work[j] = (work[j] - sum) / lmDiag[j];
904 }
905 }
906
907 // permute the components of z back to components of lmDir
908 for (int j = 0; j < lmDir.length; ++j) {
909 lmDir[permutation[j]] = work[j];
910 }
911 }
912
913 /**
914 * Decompose a matrix A as A.P = Q.R using Householder transforms.
915 * <p>As suggested in the P. Lascaux and R. Theodor book
916 * <i>Analyse numérique matricielle appliquée à
917 * l'art de l'ingénieur</i> (Masson, 1986), instead of representing
918 * the Householder transforms with u<sub>k</sub> unit vectors such that:
919 * <pre>
920 * H<sub>k</sub> = I - 2u<sub>k</sub>.u<sub>k</sub><sup>t</sup>
921 * </pre>
922 * we use <sub>k</sub> non-unit vectors such that:
923 * <pre>
924 * H<sub>k</sub> = I - beta<sub>k</sub>v<sub>k</sub>.v<sub>k</sub><sup>t</sup>
925 * </pre>
926 * where v<sub>k</sub> = a<sub>k</sub> - alpha<sub>k</sub> e<sub>k</sub>.
927 * The beta<sub>k</sub> coefficients are provided upon exit as recomputing
928 * them from the v<sub>k</sub> vectors would be costly.</p>
929 * <p>This decomposition handles rank deficient cases since the tranformations
930 * are performed in non-increasing columns norms order thanks to columns
931 * pivoting. The diagonal elements of the R matrix are therefore also in
932 * non-increasing absolute values order.</p>
933 *
934 * @param jacobian Weighted Jacobian matrix at the current point.
935 * @param solvedCols Number of solved point.
936 * @return data used in other methods of this class.
937 * @throws MathIllegalStateException if the decomposition cannot be performed.
938 */
939 private InternalData qrDecomposition(RealMatrix jacobian, int solvedCols)
940 throws MathIllegalStateException {
941 // Code in this class assumes that the weighted Jacobian is -(W^(1/2) J),
942 // hence the multiplication by -1.
943 final double[][] weightedJacobian = jacobian.scalarMultiply(-1).getData();
944
945 final int nR = weightedJacobian.length;
946 final int nC = weightedJacobian[0].length;
947
948 final int[] permutation = new int[nC];
949 final double[] diagR = new double[nC];
950 final double[] jacNorm = new double[nC];
951 final double[] beta = new double[nC];
952
953 // initializations
954 for (int k = 0; k < nC; ++k) {
955 permutation[k] = k;
956 double norm2 = 0;
957 for (double[] doubles : weightedJacobian) {
958 double akk = doubles[k];
959 norm2 += akk * akk;
960 }
961 jacNorm[k] = FastMath.sqrt(norm2);
962 }
963
964 // transform the matrix column after column
965 for (int k = 0; k < nC; ++k) {
966
967 // select the column with the greatest norm on active components
968 int nextColumn = -1;
969 double ak2 = Double.NEGATIVE_INFINITY;
970 for (int i = k; i < nC; ++i) {
971 double norm2 = 0;
972 for (int j = k; j < nR; ++j) {
973 double aki = weightedJacobian[j][permutation[i]];
974 norm2 += aki * aki;
975 }
976 if (Double.isInfinite(norm2) || Double.isNaN(norm2)) {
977 throw new MathIllegalStateException(LocalizedOptimFormats.UNABLE_TO_PERFORM_QR_DECOMPOSITION_ON_JACOBIAN,
978 nR, nC);
979 }
980 if (norm2 > ak2) {
981 nextColumn = i;
982 ak2 = norm2;
983 }
984 }
985 if (ak2 <= qrRankingThreshold) {
986 return new InternalData(weightedJacobian, permutation, k, diagR, jacNorm, beta);
987 }
988 int pk = permutation[nextColumn];
989 permutation[nextColumn] = permutation[k];
990 permutation[k] = pk;
991
992 // choose alpha such that Hk.u = alpha ek
993 double akk = weightedJacobian[k][pk];
994 double alpha = (akk > 0) ? -FastMath.sqrt(ak2) : FastMath.sqrt(ak2);
995 double betak = 1.0 / (ak2 - akk * alpha);
996 beta[pk] = betak;
997
998 // transform the current column
999 diagR[pk] = alpha;
1000 weightedJacobian[k][pk] -= alpha;
1001
1002 // transform the remaining columns
1003 for (int dk = nC - 1 - k; dk > 0; --dk) {
1004 double gamma = 0;
1005 for (int j = k; j < nR; ++j) {
1006 gamma += weightedJacobian[j][pk] * weightedJacobian[j][permutation[k + dk]];
1007 }
1008 gamma *= betak;
1009 for (int j = k; j < nR; ++j) {
1010 weightedJacobian[j][permutation[k + dk]] -= gamma * weightedJacobian[j][pk];
1011 }
1012 }
1013 }
1014
1015 return new InternalData(weightedJacobian, permutation, solvedCols, diagR, jacNorm, beta);
1016 }
1017
1018 /**
1019 * Compute the product Qt.y for some Q.R. decomposition.
1020 *
1021 * @param y vector to multiply (will be overwritten with the result)
1022 * @param internalData Data.
1023 */
1024 private void qTy(double[] y,
1025 InternalData internalData) {
1026 final double[][] weightedJacobian = internalData.weightedJacobian;
1027 final int[] permutation = internalData.permutation;
1028 final double[] beta = internalData.beta;
1029
1030 final int nR = weightedJacobian.length;
1031 final int nC = weightedJacobian[0].length;
1032
1033 for (int k = 0; k < nC; ++k) {
1034 int pk = permutation[k];
1035 double gamma = 0;
1036 for (int i = k; i < nR; ++i) {
1037 gamma += weightedJacobian[i][pk] * y[i];
1038 }
1039 gamma *= beta[pk];
1040 for (int i = k; i < nR; ++i) {
1041 y[i] -= gamma * weightedJacobian[i][pk];
1042 }
1043 }
1044 }
1045 }