1 /*
2 * Licensed to the Apache Software Foundation (ASF) under one or more
3 * contributor license agreements. See the NOTICE file distributed with
4 * this work for additional information regarding copyright ownership.
5 * The ASF licenses this file to You under the Apache License, Version 2.0
6 * (the "License"); you may not use this file except in compliance with
7 * the License. You may obtain a copy of the License at
8 *
9 * https://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 /*
19 * This is not the original file distributed by the Apache Software Foundation
20 * It has been modified by the Hipparchus project
21 */
22 package org.hipparchus.stat.regression;
23
24 import org.hipparchus.linear.Array2DRowRealMatrix;
25 import org.hipparchus.linear.LUDecomposition;
26 import org.hipparchus.linear.RealMatrix;
27 import org.hipparchus.linear.RealVector;
28
29 /**
30 * The GLS implementation of multiple linear regression.
31 *
32 * GLS assumes a general covariance matrix Omega of the error
33 * <pre>
34 * u ~ N(0, Omega)
35 * </pre>
36 *
37 * Estimated by GLS,
38 * <pre>
39 * b=(X' Omega^-1 X)^-1X'Omega^-1 y
40 * </pre>
41 * whose variance is
42 * <pre>
43 * Var(b)=(X' Omega^-1 X)^-1
44 * </pre>
45 */
46 public class GLSMultipleLinearRegression extends AbstractMultipleLinearRegression {
47
48 /** Covariance matrix. */
49 private RealMatrix Omega;
50
51 /** Inverse of covariance matrix. */
52 private RealMatrix OmegaInverse;
53
54 /** Empty constructor.
55 * <p>
56 * This constructor is not strictly necessary, but it prevents spurious
57 * javadoc warnings with JDK 18 and later.
58 * </p>
59 * @since 3.0
60 */
61 public GLSMultipleLinearRegression() { // NOPMD - unnecessary constructor added intentionally to make javadoc happy
62 // nothing to do
63 }
64
65 /** Replace sample data, overriding any previous sample.
66 * @param y y values of the sample
67 * @param x x values of the sample
68 * @param covariance array representing the covariance matrix
69 */
70 public void newSampleData(double[] y, double[][] x, double[][] covariance) {
71 validateSampleData(x, y);
72 newYSampleData(y);
73 newXSampleData(x);
74 validateCovarianceData(x, covariance);
75 newCovarianceData(covariance);
76 }
77
78 /**
79 * Add the covariance data.
80 *
81 * @param omega the [n,n] array representing the covariance
82 */
83 protected void newCovarianceData(double[][] omega){
84 this.Omega = new Array2DRowRealMatrix(omega);
85 this.OmegaInverse = null;
86 }
87
88 /**
89 * Get the inverse of the covariance.
90 * <p>The inverse of the covariance matrix is lazily evaluated and cached.</p>
91 * @return inverse of the covariance
92 */
93 protected RealMatrix getOmegaInverse() {
94 if (OmegaInverse == null) {
95 OmegaInverse = new LUDecomposition(Omega).getSolver().getInverse();
96 }
97 return OmegaInverse;
98 }
99
100 /**
101 * Calculates beta by GLS.
102 * <pre>
103 * b=(X' Omega^-1 X)^-1X'Omega^-1 y
104 * </pre>
105 * @return beta
106 */
107 @Override
108 protected RealVector calculateBeta() {
109 RealMatrix OI = getOmegaInverse();
110 RealMatrix XT = getX().transpose();
111 RealMatrix XTOIX = XT.multiply(OI).multiply(getX());
112 RealMatrix inverse = new LUDecomposition(XTOIX).getSolver().getInverse();
113 return inverse.multiply(XT).multiply(OI).operate(getY());
114 }
115
116 /**
117 * Calculates the variance on the beta.
118 * <pre>
119 * Var(b)=(X' Omega^-1 X)^-1
120 * </pre>
121 * @return The beta variance matrix
122 */
123 @Override
124 protected RealMatrix calculateBetaVariance() {
125 RealMatrix OI = getOmegaInverse();
126 RealMatrix XTOIX = getX().transposeMultiply(OI).multiply(getX());
127 return new LUDecomposition(XTOIX).getSolver().getInverse();
128 }
129
130
131 /**
132 * Calculates the estimated variance of the error term using the formula
133 * <pre>
134 * Var(u) = Tr(u' Omega^-1 u)/(n-k)
135 * </pre>
136 * where n and k are the row and column dimensions of the design
137 * matrix X.
138 *
139 * @return error variance
140 */
141 @Override
142 protected double calculateErrorVariance() {
143 RealVector residuals = calculateResiduals();
144 double t = residuals.dotProduct(getOmegaInverse().operate(residuals));
145 return t / (getX().getRowDimension() - getX().getColumnDimension());
146
147 }
148
149 }