LinearPredictor.cc
Go to the documentation of this file.
1 
10 #include "LinearPredictor.h"
11 
12 #include <cmath>
13 #include <vector>
14 
15 #ifndef __python_test__
16 #include <utl/AugerException.h>
17 #endif
18 
19 #include <Eigen/Dense>
20 using namespace Eigen;
21 
22 #ifndef __python_test__
23 using namespace utl;
24 #endif
25 using namespace std;
26 
28  LinearPredictor::LinearPredictor(unsigned int numCoeffPerChannel, unsigned int delayLine, unsigned int numChannels, unsigned int optimization)
29  :
30  numCoeffPerChannel(numCoeffPerChannel),
31  numChannels(numChannels),
32  delayLine(delayLine),
33  //channels(numChannels), // unused. LN.
34  dimCovMatr((numCoeffPerChannel + 1)*numChannels),
35  dimPredictorCoefficients(numCoeffPerChannel*numChannels),
36  totalPredictorOffset(numCoeffPerChannel + delayLine),
37  optimization(optimization),
38  covMatr(dimCovMatr, dimCovMatr),
39  predictorCoefficients()
40  {
41  if (!numChannels) {
42  #ifndef __python_test__
43  throw InvalidConfigurationException("Need at least one channel to work on.");
44  #else
45  cout << "Need at least one channel to work on." << endl;
46  exit(1);
47  #endif
48  };
49  covMatr.setZero();
50  }
51 
52  void LinearPredictor::train(std::vector< std::vector<double> > const &channels, double fudgeFactor) {
53  if (channels.size() != numChannels) {
54  #ifndef __python_test__
55  throw InvalidConfigurationException("The number of provided (active) channels is not equal to the number of configured channels.");
56  #else
57  cout << "The number of provided (active) channels is not equal to the number of configured channels." << endl;
58  exit(1);
59  #endif
60  };
61  unsigned int channelLength = channels[0].size();
62  unsigned int maxTraceStartIndex = channelLength - totalPredictorOffset;
63  VectorXd samplingVector(dimCovMatr);
64 
65  // covMatr contains all relevant (co)variances.
66  // The complete matrix looks like:
67  // X ... X Y ... Y
68  // . . . Y . Y
69  // . . . Y . Y
70  // . . . Y . Y
71  // X ... X Y ... Y
72  // Z ... Z A ... A
73  // . . . . . .
74  // . . . . . .
75  // . . . . . .
76  // Z ... Z A ... A
77  // XX is a block-Toeplitz matrix
78  // Where X is a square matrix with as dimension the total number of coefficients dimX
79  #define dimX dimPredictorCoefficients
80  // Where A is a square matrix with as dimension the total number of channels dimA
81  #define dimA numChannels
82  // X and Y are used but Z and A are not used in the rest of this calculation
83 
84  for (unsigned int traceStartIndex = 0; traceStartIndex < maxTraceStartIndex; ++traceStartIndex) {
85  // fill the first part of the sampling vector before the delay line into the sampling vector
86  unsigned int samplingIndex = 0;
87  unsigned int traceIndex = 0;
88  for (
89  traceIndex = traceStartIndex;
90  traceIndex < traceStartIndex + numCoeffPerChannel;
91  ++traceIndex
92  ) {
93  for (unsigned int channelNum = 0; channelNum < numChannels; ++channelNum) {
94  samplingVector(samplingIndex++) = channels[channelNum][traceIndex];
95  }
96  }
97  // fill the last samples after the delay line into the sampling vector
98  traceIndex += delayLine;
99  for (unsigned int channelNum = 0; channelNum < numChannels; ++channelNum, ++samplingIndex) {
100  samplingVector(samplingIndex) = channels[channelNum][traceIndex];
101  }
102  if (optimization == 0) {
103  covMatr += samplingVector * samplingVector.transpose();
104  } else {
105  // Optimize exploiting the block-band-diagonal structure of the matrix
106  #define S samplingVector
107  covMatr.block(0, 0, dimA, dimX) += S.block(0, 0, dimA, 1) * S.block(0, 0, dimX, 1).transpose();
108  covMatr.block(dimA, 0, dimX, dimA) += S.block(dimA, 0, dimX, 1) * S.block(0, 0, dimA, 1).transpose();
109  covMatr.block(dimX, dimA, dimA, dimX) += S.block(dimX, 0, dimA, 1) * S.block(dimA, 0, dimX, 1).transpose();
110  covMatr.block(0, dimX, dimX, dimA) += S.block(0, 0, dimX, 1) * S.block(dimX, 0, dimA, 1).transpose();
111  #undef S
112  }
113  }
114  if (optimization > 0) {
115  for (unsigned int i = 1; i < numCoeffPerChannel; ++i) {
116  for (unsigned int j = 1; j < numCoeffPerChannel; ++j) {
117  if (i > j) {
118  covMatr.block(i*dimA, j*dimA, dimA, dimA) = covMatr.block((i-j)*dimA, 0, dimA, dimA);
119  } else {
120  covMatr.block(i*dimA, j*dimA, dimA, dimA) = covMatr.block(0, (j-i)*dimA, dimA, dimA);
121  }
122  }
123  }
124  }
125  // We take the sub-matrix X from the covariance matrix
126  MatrixXd subMatrixX = covMatr.block(0, 0, dimX, dimX);
127  // We multiply the block diagonal with the fudge factor
128  for (unsigned int i = 0; i < numCoeffPerChannel; ++i) {
129  for (unsigned int a = 0; a < numChannels; ++a) {
130  for (unsigned int b = 0; b < numChannels; ++b) {
131  subMatrixX(i*numChannels+a, i*numChannels+b) *= (1.0+fudgeFactor);
132  }
133  }
134  }
135  // // This is the old rather inefficient way of finding the solution to the igenvalue problem
136  // // We calculate the inverse of the sub-matrix X
137  // MatrixXd inverseSubMatrixX(subMatrixX.inverse());
138  // // We take the sub-matrix Y from the covariance matrix
139  // MatrixXd subMatrixY = covMatr.block(0, dimX, dimX, dimA);
140  // // The coefficients are the inner product of these two
141  // predictorCoefficients = inverseSubMatrixX * subMatrixY;
142  // The matrix is positive semi-definite so we choose LDLT.
143  MatrixXd subMatrixY = covMatr.block(0, dimX, dimX, dimA);
144  predictorCoefficients = subMatrixX.ldlt().solve(subMatrixY);
145  }
146 
147  std::vector< std::vector<double> > LinearPredictor::predict(std::vector< std::vector<double> > const &channels) const {
148  if (channels.size() != numChannels) {
149  #ifndef __python_test__
150  throw InvalidConfigurationException("The number of provided (active) channels is not equal to the number of configured channels.");
151  #else
152  cout << "The number of provided (active) channels is not equal to the number of configured channels." << endl;
153  exit(1);
154  #endif
155  };
156  unsigned int channelLength = channels[0].size();
157  unsigned int maxTraceStartIndex = channelLength - totalPredictorOffset;
158  VectorXd samplingVector(dimCovMatr);
159 
160  std::vector< std::vector<double> > predicted(numChannels, vector<double>(channelLength, 0.0));
161 
162  for (unsigned int traceStartIndex = 0; traceStartIndex < maxTraceStartIndex; ++traceStartIndex) {
163  unsigned int traceIndexPredict = traceStartIndex + totalPredictorOffset;
164  for (unsigned int channelNum1 = 0; channelNum1 < numChannels; ++channelNum1) {
165  unsigned int samplingIndex = 0;
166  for (
167  unsigned int traceIndex = traceStartIndex;
168  traceIndex < traceStartIndex + numCoeffPerChannel;
169  ++traceIndex
170  ) {
171  for (unsigned int channelNum2 = 0; channelNum2 < numChannels; ++channelNum2) {
172  predicted[channelNum1][traceIndexPredict] += predictorCoefficients(samplingIndex++, channelNum1) * channels[channelNum2][traceIndex];
173  }
174  }
175  }
176  }
177  return predicted;
178  }
179 }
std::vector< std::vector< double > > predict(std::vector< std::vector< double > > const &channels) const
Base class for exceptions arising because configuration data are not valid.
#define dimX
int exit
Definition: dump1090.h:237
#define dimA
#define S
void train(std::vector< std::vector< double > > const &channels, double fudgeFactor)

, generated on Tue Sep 26 2023.