1 /*
2 * Licensed to the Hipparchus project under one or more
3 * contributor license agreements. See the NOTICE file distributed with
4 * this work for additional information regarding copyright ownership.
5 * The Hipparchus project licenses this file to You under the Apache License, Version 2.0
6 * (the "License"); you may not use this file except in compliance with
7 * the License. You may obtain a copy of the License at
8 *
9 * https://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 package org.hipparchus.ode.nonstiff;
19
20 import org.hipparchus.exception.MathIllegalArgumentException;
21 import org.hipparchus.linear.Array2DRowRealMatrix;
22 import org.hipparchus.linear.RealMatrix;
23 import org.hipparchus.ode.EquationsMapper;
24 import org.hipparchus.ode.ODEStateAndDerivative;
25 import org.hipparchus.util.FastMath;
26
27
28 /**
29 * This class implements explicit Adams-Bashforth integrators for Ordinary
30 * Differential Equations.
31 *
32 * <p>Adams-Bashforth methods (in fact due to Adams alone) are explicit
33 * multistep ODE solvers. This implementation is a variation of the classical
34 * one: it uses adaptive stepsize to implement error control, whereas
35 * classical implementations are fixed step size. The value of state vector
36 * at step n+1 is a simple combination of the value at step n and of the
37 * derivatives at steps n, n-1, n-2 ... Depending on the number k of previous
38 * steps one wants to use for computing the next value, different formulas
39 * are available:</p>
40 * <ul>
41 * <li>k = 1: y<sub>n+1</sub> = y<sub>n</sub> + h y'<sub>n</sub></li>
42 * <li>k = 2: y<sub>n+1</sub> = y<sub>n</sub> + h (3y'<sub>n</sub>-y'<sub>n-1</sub>)/2</li>
43 * <li>k = 3: y<sub>n+1</sub> = y<sub>n</sub> + h (23y'<sub>n</sub>-16y'<sub>n-1</sub>+5y'<sub>n-2</sub>)/12</li>
44 * <li>k = 4: y<sub>n+1</sub> = y<sub>n</sub> + h (55y'<sub>n</sub>-59y'<sub>n-1</sub>+37y'<sub>n-2</sub>-9y'<sub>n-3</sub>)/24</li>
45 * <li>...</li>
46 * </ul>
47 *
48 * <p>A k-steps Adams-Bashforth method is of order k.</p>
49 *
50 * <p> There must be sufficient time for the {@link #setStarterIntegrator(org.hipparchus.ode.ODEIntegrator)
51 * starter integrator} to take several steps between the the last reset event, and the end
52 * of integration, otherwise an exception may be thrown during integration. The user can
53 * adjust the end date of integration, or the step size of the starter integrator to
54 * ensure a sufficient number of steps can be completed before the end of integration.
55 * </p>
56 *
57 * <p><strong>Implementation details</strong></p>
58 *
59 * <p>We define scaled derivatives s<sub>i</sub>(n) at step n as:
60 * \[
61 * \left\{\begin{align}
62 * s_1(n) &= h y'_n \text{ for first derivative}\\
63 * s_2(n) &= \frac{h^2}{2} y_n'' \text{ for second derivative}\\
64 * s_3(n) &= \frac{h^3}{6} y_n''' \text{ for third derivative}\\
65 * &\cdots\\
66 * s_k(n) &= \frac{h^k}{k!} y_n^{(k)} \text{ for } k^\mathrm{th} \text{ derivative}
67 * \end{align}\right.
68 * \]</p>
69 *
70 * <p>The definitions above use the classical representation with several previous first
71 * derivatives. Lets define
72 * \[
73 * q_n = [ s_1(n-1) s_1(n-2) \ldots s_1(n-(k-1)) ]^T
74 * \]
75 * (we omit the k index in the notation for clarity). With these definitions,
76 * Adams-Bashforth methods can be written:</p>
77 * \[
78 * \left\{\begin{align}
79 * k = 1: & y_{n+1} = y_n + s_1(n) \\
80 * k = 2: & y_{n+1} = y_n + \frac{3}{2} s_1(n) + [ \frac{-1}{2} ] q_n \\
81 * k = 3: & y_{n+1} = y_n + \frac{23}{12} s_1(n) + [ \frac{-16}{12} \frac{5}{12} ] q_n \\
82 * k = 4: & y_{n+1} = y_n + \frac{55}{24} s_1(n) + [ \frac{-59}{24} \frac{37}{24} \frac{-9}{24} ] q_n \\
83 * & \cdots
84 * \end{align}\right.
85 * \]
86 *
87 * <p>Instead of using the classical representation with first derivatives only (y<sub>n</sub>,
88 * s<sub>1</sub>(n) and q<sub>n</sub>), our implementation uses the Nordsieck vector with
89 * higher degrees scaled derivatives all taken at the same step (y<sub>n</sub>, s<sub>1</sub>(n)
90 * and r<sub>n</sub>) where r<sub>n</sub> is defined as:
91 * \[
92 * r_n = [ s_2(n), s_3(n) \ldots s_k(n) ]^T
93 * \]
94 * (here again we omit the k index in the notation for clarity)
95 * </p>
96 *
97 * <p>Taylor series formulas show that for any index offset i, s<sub>1</sub>(n-i) can be
98 * computed from s<sub>1</sub>(n), s<sub>2</sub>(n) ... s<sub>k</sub>(n), the formula being exact
99 * for degree k polynomials.
100 * \[
101 * s_1(n-i) = s_1(n) + \sum_{j\gt 0} (j+1) (-i)^j s_{j+1}(n)
102 * \]
103 * The previous formula can be used with several values for i to compute the transform between
104 * classical representation and Nordsieck vector. The transform between r<sub>n</sub>
105 * and q<sub>n</sub> resulting from the Taylor series formulas above is:
106 * \[
107 * q_n = s_1(n) u + P r_n
108 * \]
109 * where u is the [ 1 1 ... 1 ]<sup>T</sup> vector and P is the (k-1)×(k-1) matrix built
110 * with the \((j+1) (-i)^j\) terms with i being the row number starting from 1 and j being
111 * the column number starting from 1:
112 * \[
113 * P=\begin{bmatrix}
114 * -2 & 3 & -4 & 5 & \ldots \\
115 * -4 & 12 & -32 & 80 & \ldots \\
116 * -6 & 27 & -108 & 405 & \ldots \\
117 * -8 & 48 & -256 & 1280 & \ldots \\
118 * & & \ldots\\
119 * \end{bmatrix}
120 * \]
121 * </p>
122 *
123 * <p>Using the Nordsieck vector has several advantages:</p>
124 * <ul>
125 * <li>it greatly simplifies step interpolation as the interpolator mainly applies
126 * Taylor series formulas,</li>
127 * <li>it simplifies step changes that occur when discrete events that truncate
128 * the step are triggered,</li>
129 * <li>it allows to extend the methods in order to support adaptive stepsize.</li>
130 * </ul>
131 *
132 * <p>The Nordsieck vector at step n+1 is computed from the Nordsieck vector at step n as follows:
133 * <ul>
134 * <li>y<sub>n+1</sub> = y<sub>n</sub> + s<sub>1</sub>(n) + u<sup>T</sup> r<sub>n</sub></li>
135 * <li>s<sub>1</sub>(n+1) = h f(t<sub>n+1</sub>, y<sub>n+1</sub>)</li>
136 * <li>r<sub>n+1</sub> = (s<sub>1</sub>(n) - s<sub>1</sub>(n+1)) P<sup>-1</sup> u + P<sup>-1</sup> A P r<sub>n</sub></li>
137 * </ul>
138 * <p>where A is a rows shifting matrix (the lower left part is an identity matrix):</p>
139 * <pre>
140 * [ 0 0 ... 0 0 | 0 ]
141 * [ ---------------+---]
142 * [ 1 0 ... 0 0 | 0 ]
143 * A = [ 0 1 ... 0 0 | 0 ]
144 * [ ... | 0 ]
145 * [ 0 0 ... 1 0 | 0 ]
146 * [ 0 0 ... 0 1 | 0 ]
147 * </pre>
148 *
149 * <p>The P<sup>-1</sup>u vector and the P<sup>-1</sup> A P matrix do not depend on the state,
150 * they only depend on k and therefore are precomputed once for all.</p>
151 *
152 */
153 public class AdamsBashforthIntegrator extends AdamsIntegrator {
154
155 /** Name of integration scheme. */
156 public static final String METHOD_NAME = "Adams-Bashforth";
157
158 /**
159 * Build an Adams-Bashforth integrator with the given order and step control parameters.
160 * @param nSteps number of steps of the method excluding the one being computed
161 * @param minStep minimal step (sign is irrelevant, regardless of
162 * integration direction, forward or backward), the last step can
163 * be smaller than this
164 * @param maxStep maximal step (sign is irrelevant, regardless of
165 * integration direction, forward or backward), the last step can
166 * be smaller than this
167 * @param scalAbsoluteTolerance allowed absolute error
168 * @param scalRelativeTolerance allowed relative error
169 * @exception MathIllegalArgumentException if order is 1 or less
170 */
171 public AdamsBashforthIntegrator(final int nSteps,
172 final double minStep, final double maxStep,
173 final double scalAbsoluteTolerance,
174 final double scalRelativeTolerance)
175 throws MathIllegalArgumentException {
176 super(METHOD_NAME, nSteps, nSteps, minStep, maxStep,
177 scalAbsoluteTolerance, scalRelativeTolerance);
178 }
179
180 /**
181 * Build an Adams-Bashforth integrator with the given order and step control parameters.
182 * @param nSteps number of steps of the method excluding the one being computed
183 * @param minStep minimal step (sign is irrelevant, regardless of
184 * integration direction, forward or backward), the last step can
185 * be smaller than this
186 * @param maxStep maximal step (sign is irrelevant, regardless of
187 * integration direction, forward or backward), the last step can
188 * be smaller than this
189 * @param vecAbsoluteTolerance allowed absolute error
190 * @param vecRelativeTolerance allowed relative error
191 * @exception IllegalArgumentException if order is 1 or less
192 */
193 public AdamsBashforthIntegrator(final int nSteps,
194 final double minStep, final double maxStep,
195 final double[] vecAbsoluteTolerance,
196 final double[] vecRelativeTolerance)
197 throws IllegalArgumentException {
198 super(METHOD_NAME, nSteps, nSteps, minStep, maxStep,
199 vecAbsoluteTolerance, vecRelativeTolerance);
200 }
201
202 /** {@inheritDoc} */
203 @Override
204 protected double errorEstimation(final double[] previousState, final double predictedTime,
205 final double[] predictedState,
206 final double[] predictedScaled,
207 final RealMatrix predictedNordsieck) {
208
209 final StepsizeHelper helper = getStepSizeHelper();
210 double error = 0;
211 for (int i = 0; i < helper.getMainSetDimension(); ++i) {
212 final double tol = helper.getTolerance(i, FastMath.abs(predictedState[i]));
213
214 // apply Taylor formula from high order to low order,
215 // for the sake of numerical accuracy
216 double variation = 0;
217 int sign = predictedNordsieck.getRowDimension() % 2 == 0 ? -1 : 1;
218 for (int k = predictedNordsieck.getRowDimension() - 1; k >= 0; --k) {
219 variation += sign * predictedNordsieck.getEntry(k, i);
220 sign = -sign;
221 }
222 variation -= predictedScaled[i];
223
224 final double ratio = (predictedState[i] - previousState[i] + variation) / tol;
225 error += ratio * ratio;
226
227 }
228
229 return FastMath.sqrt(error / helper.getMainSetDimension());
230
231 }
232
233 /** {@inheritDoc} */
234 @Override
235 protected AdamsStateInterpolator finalizeStep(final double stepSize, final double[] predictedState,
236 final double[] predictedScaled, final Array2DRowRealMatrix predictedNordsieck,
237 final boolean isForward,
238 final ODEStateAndDerivative globalPreviousState,
239 final ODEStateAndDerivative globalCurrentState,
240 final EquationsMapper equationsMapper) {
241 return new AdamsStateInterpolator(getStepSize(), globalCurrentState,
242 predictedScaled, predictedNordsieck, isForward,
243 getStepStart(), globalCurrentState, equationsMapper);
244 }
245
246 }