GestureRecognitionToolkit  Version: 0.2.5
The Gesture Recognition Toolkit (GRT) is a cross-platform, open-source, c++ machine learning library for real-time gesture recognition.
DTW.cpp
1 /*
2 GRT MIT License
3 Copyright (c) <2012> <Nicholas Gillian, Media Lab, MIT>
4 
5 Permission is hereby granted, free of charge, to any person obtaining a copy of this software
6 and associated documentation files (the "Software"), to deal in the Software without restriction,
7 including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
9 subject to the following conditions:
10 
11 The above copyright notice and this permission notice shall be included in all copies or substantial
12 portions of the Software.
13 
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
15 LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
16 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
17 WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
18 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19 */
20 
21 #define GRT_DLL_EXPORTS
22 #include "DTW.h"
23 
24 GRT_BEGIN_NAMESPACE
25 
26 //Define the string that will be used to identify the object
27 const std::string DTW::id = "DTW";
28 std::string DTW::getId() { return DTW::id; }
29 
30 //Register the DTW module with the Classifier base class
31 RegisterClassifierModule< DTW > DTW::registerModule( DTW::getId() );
32 
33 DTW::DTW(bool useScaling,bool useNullRejection,Float nullRejectionCoeff,UINT rejectionMode,bool constrainWarpingPath,Float radius,bool offsetUsingFirstSample,bool useSmoothing,UINT smoothingFactor,Float nullRejectionLikelihoodThreshold) : Classifier( DTW::getId() )
34 {
35 
36  this->useScaling=useScaling;
37  this->useNullRejection = useNullRejection;
38  this->nullRejectionCoeff = nullRejectionCoeff;
39  this->nullRejectionLikelihoodThreshold = nullRejectionLikelihoodThreshold;
40  this->rejectionMode = rejectionMode;
41  this->constrainWarpingPath = constrainWarpingPath;
42  this->radius = radius;
43  this->offsetUsingFirstSample = offsetUsingFirstSample;
44  this->useSmoothing = useSmoothing;
45  this->smoothingFactor = smoothingFactor;
46 
47  supportsNullRejection = true;
48  trained=false;
49  useZNormalisation=false;
50  constrainZNorm=false;
51  trimTrainingData = false;
52 
53  zNormConstrainThreshold=0.2;
54  trimThreshold = 0.1;
55  maximumTrimPercentage = 90;
56 
57  numTemplates=0;
58  distanceMethod=EUCLIDEAN_DIST;
59 
60  averageTemplateLength =0;
61 
62  classifierMode = TIMESERIES_CLASSIFIER_MODE;
63 }
64 
65 DTW::DTW(const DTW &rhs) : Classifier( DTW::getId() )
66 {
67  *this = rhs;
68 }
69 
70 DTW::~DTW(void){
71 }
72 
73 DTW& DTW::operator=(const DTW &rhs){
74 
75  if( this != &rhs ){
76 
77  this->templatesBuffer = rhs.templatesBuffer;
78  this->distanceMatrices = rhs.distanceMatrices;
79  this->warpPaths = rhs.warpPaths;
80  this->continuousInputDataBuffer = rhs.continuousInputDataBuffer;
81  this->numTemplates = rhs.numTemplates;
82  this->useSmoothing = rhs.useSmoothing;
83  this->useZNormalisation = rhs.useZNormalisation;
84  this->constrainZNorm = rhs.constrainZNorm;
85  this->constrainWarpingPath = rhs.constrainWarpingPath;
86  this->trimTrainingData = rhs.trimTrainingData;
87  this->zNormConstrainThreshold = rhs.zNormConstrainThreshold;
88  this->radius = rhs.radius;
89  this->offsetUsingFirstSample = rhs.offsetUsingFirstSample;
90  this->trimThreshold = rhs.trimThreshold;
91  this->maximumTrimPercentage = rhs.maximumTrimPercentage;
92  this->smoothingFactor = rhs.smoothingFactor;
93  this->distanceMethod = rhs.distanceMethod;
94  this->rejectionMode = rhs.rejectionMode;
95  this->nullRejectionLikelihoodThreshold = rhs.nullRejectionLikelihoodThreshold;
96  this->averageTemplateLength = rhs.averageTemplateLength;
97 
98  //Copy the classifier variables
99  copyBaseVariables( (Classifier*)&rhs );
100  }
101 
102  return *this;
103 }
104 
105 bool DTW::deepCopyFrom(const Classifier *classifier){
106 
107  if( classifier == NULL ) return false;
108 
109  if( this->getClassifierType() == classifier->getClassifierType() ){
110 
111  DTW *ptr = (DTW*)classifier;
112  this->templatesBuffer = ptr->templatesBuffer;
113  this->distanceMatrices = ptr->distanceMatrices;
114  this->warpPaths = ptr->warpPaths;
115  this->continuousInputDataBuffer = ptr->continuousInputDataBuffer;
116  this->numTemplates = ptr->numTemplates;
117  this->useSmoothing = ptr->useSmoothing;
118  this->useZNormalisation = ptr->useZNormalisation;
119  this->constrainZNorm = ptr->constrainZNorm;
120  this->constrainWarpingPath = ptr->constrainWarpingPath;
121  this->trimTrainingData = ptr->trimTrainingData;
122  this->zNormConstrainThreshold = ptr->zNormConstrainThreshold;
123  this->radius = ptr->radius;
124  this->offsetUsingFirstSample = ptr->offsetUsingFirstSample;
125  this->trimThreshold = ptr->trimThreshold;
126  this->maximumTrimPercentage = ptr->maximumTrimPercentage;
127  this->smoothingFactor = ptr->smoothingFactor;
128  this->distanceMethod = ptr->distanceMethod;
129  this->rejectionMode = ptr->rejectionMode;
130  this->nullRejectionLikelihoodThreshold = ptr->nullRejectionLikelihoodThreshold;
131  this->averageTemplateLength = ptr->averageTemplateLength;
132 
133  //Copy the classifier variables
134  return copyBaseVariables( classifier );
135  }
136 
137  return false;
138 }
139 
142 
143  UINT bestIndex = 0;
144 
145  //Cleanup Memory
146  templatesBuffer.clear();
147  classLabels.clear();
148  trained = false;
149  continuousInputDataBuffer.clear();
150 
151  if( trimTrainingData ){
152  TimeSeriesClassificationSampleTrimmer timeSeriesTrimmer(trimThreshold,maximumTrimPercentage);
154  tempData.setNumDimensions( data.getNumDimensions() );
155 
156  for(UINT i=0; i<data.getNumSamples(); i++){
157  if( timeSeriesTrimmer.trimTimeSeries( data[i] ) ){
158  tempData.addSample(data[i].getClassLabel(), data[i].getData());
159  }else{
160  trainingLog << "Removing training sample " << i << " from the dataset as it could not be trimmed!" << std::endl;
161  }
162  }
163  //Overwrite the original training data with the trimmed dataset
164  data = tempData;
165  }
166 
167  if( data.getNumSamples() == 0 ){
168  errorLog << __GRT_LOG__ << " Can't train model as there are no samples in training data!" << std::endl;
169  return false;
170  }
171 
172  //Assign
173  numClasses = data.getNumClasses();
174  numTemplates = data.getNumClasses();
175  numInputDimensions = data.getNumDimensions();
176  templatesBuffer.resize( numClasses );
177  classLabels.resize( numClasses );
178  nullRejectionThresholds.resize( numClasses );
179  averageTemplateLength = 0;
180 
181  //Need to copy the labelled training data incase we need to scale it or znorm it
182  TimeSeriesClassificationData trainingData( data );
183 
184  //Perform any scaling or normalisation
185  ranges = trainingData.getRanges();
186  if( useScaling ) scaleData( trainingData );
187  if( useZNormalisation ) znormData( trainingData );
188 
189  //For each class, run a one-to-one DTW and find the template the best describes the data
190  for(UINT k=0; k<numTemplates; k++){
191  //Get the class label for the cth class
192  UINT classLabel = trainingData.getClassTracker()[k].classLabel;
193  TimeSeriesClassificationData classData = trainingData.getClassData( classLabel );
194  UINT numExamples = classData.getNumSamples();
195  bestIndex = 0;
196 
197  //Set the class label of this template
198  templatesBuffer[k].classLabel = classLabel;
199 
200  //Set the kth class label
201  classLabels[k] = classLabel;
202 
203  trainingLog << "Training Template: " << k << " Class: " << classLabel << std::endl;
204 
205  //Check to make sure we actually have some training examples
206  if( numExamples < 1 ){
207  errorLog << __GRT_LOG__ << " Can not train model: Num of Example is < 1! Class: " << classLabel << ". Turn off null rejection if you want to use DTW with only 1 training sample per class." << std::endl;
208  return false;
209  }
210 
211  if( numExamples == 1 && useNullRejection ){
212  errorLog << __GRT_LOG__ << " Can not train model as there is only 1 example in class: " << classLabel << ". Turn off null rejection if you want to use DTW with only 1 training sample per class." << std::endl;
213  return false;
214  }
215 
216  if( numExamples == 1 ){//If we have just one training example then we have to use it as the template
217  bestIndex = 0;
218  nullRejectionThresholds[k] = 0.0;//TODO-We need a better way of calculating this!
219  }else{
220  //Search for the best training example for this class
221  if( !train_NDDTW(classData,templatesBuffer[k],bestIndex) ){
222  errorLog << __GRT_LOG__ << " Failed to train template for class with label: " << classLabel << std::endl;
223  return false;
224  }
225  }
226 
227  //Add the template with the best index to the buffer
228  int trainingMethod = 0;
229  if(useSmoothing) trainingMethod = 1;
230 
231  switch (trainingMethod) {
232  case(0)://Standard Training
233  templatesBuffer[k].timeSeries = classData[bestIndex].getData();
234  break;
235  case(1)://Training using Smoothing
236  //Smooth the data, reducing its size by a factor set by smoothFactor
237  smoothData(classData[ bestIndex ].getData(),smoothingFactor,templatesBuffer[k].timeSeries);
238  break;
239  default:
240  errorLog << __GRT_LOG__ << " Can not train model: Unknown training method " << std::endl;
241  return false;
242  break;
243  }
244 
245  if( offsetUsingFirstSample ){
246  offsetTimeseries( templatesBuffer[k].timeSeries );
247  }
248 
249  //Add the average length of the training examples for this template to the overall averageTemplateLength
250  averageTemplateLength += templatesBuffer[k].averageTemplateLength;
251  }
252 
253  //Flag that the models have been trained
254  trained = true;
255  converged = true;
256  averageTemplateLength = averageTemplateLength/numTemplates;
257 
258  //Recompute the null rejection thresholds
260 
261  //Resize the prediction results to make sure it is setup for realtime prediction
262  continuousInputDataBuffer.clear();
263  continuousInputDataBuffer.resize(averageTemplateLength,VectorFloat(numInputDimensions,0));
264  classLikelihoods.resize(numTemplates,DEFAULT_NULL_LIKELIHOOD_VALUE);
265  classDistances.resize(numTemplates,0);
266  predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
267  maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
268 
269  //Training complete
270  return trained;
271 }
272 
273 bool DTW::train_NDDTW(TimeSeriesClassificationData &trainingData,DTWTemplate &dtwTemplate,UINT &bestIndex){
274 
275  UINT numExamples = trainingData.getNumSamples();
276  VectorFloat results(numExamples,0.0);
277  MatrixFloat distanceResults(numExamples,numExamples);
278  dtwTemplate.averageTemplateLength = 0;
279 
280  for(UINT m=0; m<numExamples; m++){
281 
282  MatrixFloat templateA; //The m'th template
283  MatrixFloat templateB; //The n'th template
284  dtwTemplate.averageTemplateLength += trainingData[m].getLength();
285 
286  //Smooth the data if required
287  if( useSmoothing ) smoothData(trainingData[m].getData(),smoothingFactor,templateA);
288  else templateA = trainingData[m].getData();
289 
290  if( offsetUsingFirstSample ){
291  offsetTimeseries(templateA);
292  }
293 
294  for(UINT n=0; n<numExamples; n++){
295  if(m!=n){
296  //Smooth the data if required
297  if( useSmoothing ) smoothData(trainingData[n].getData(),smoothingFactor,templateB);
298  else templateB = trainingData[n].getData();
299 
300  if( offsetUsingFirstSample ){
301  offsetTimeseries(templateB);
302  }
303 
304  //Compute the distance between the two time series
305  MatrixFloat distanceMatrix(templateA.getNumRows(),templateB.getNumRows());
306  Vector< IndexDist > warpPath;
307  Float dist = computeDistance(templateA,templateB,distanceMatrix,warpPath);
308 
309  trainingLog << "Template: " << m << " Timeseries: " << n << " Dist: " << dist << std::endl;
310 
311  //Update the results values
312  distanceResults[m][n] = dist;
313  results[m] += dist;
314  }else distanceResults[m][n] = 0; //The distance is zero because the two timeseries are the same
315  }
316  }
317 
318  for(UINT m=0; m<numExamples; m++) results[m]/=(numExamples-1);
319  //Find the best average result, this is the result with the minimum value
320  bestIndex = 0;
321  Float bestAverage = results[0];
322  for(UINT m=1; m<numExamples; m++){
323  if( results[m] < bestAverage ){
324  bestAverage = results[m];
325  bestIndex = m;
326  }
327  }
328 
329  if( numExamples > 2 ){
330  //Work out the threshold value for the best template
331  dtwTemplate.trainingMu = results[bestIndex];
332  dtwTemplate.trainingSigma = 0.0;
333 
334  for(UINT n=0; n<numExamples; n++){
335  if(n!=bestIndex){
336  dtwTemplate.trainingSigma += SQR( distanceResults[ bestIndex ][n] - dtwTemplate.trainingMu );
337  }
338  }
339  dtwTemplate.trainingSigma = sqrt( dtwTemplate.trainingSigma / Float(numExamples-2) );
340  }else{
341  warningLog << __GRT_LOG__ << " There are not enough examples to compute the trainingMu and trainingSigma for the template for class " << dtwTemplate.classLabel << std::endl;
342  dtwTemplate.trainingMu = 0.0;
343  dtwTemplate.trainingSigma = 0.0;
344  }
345 
346  //Set the average length of the training examples
347  dtwTemplate.averageTemplateLength = (UINT) (dtwTemplate.averageTemplateLength/Float(numExamples));
348 
349  trainingLog << "AverageTemplateLength: " << dtwTemplate.averageTemplateLength << std::endl;
350 
351  //Flag that the training was successfull
352  return true;
353 }
354 
355 
356 bool DTW::predict_(MatrixFloat &inputTimeSeries){
357 
358  if( !trained ){
359  errorLog << __GRT_LOG__ << " The DTW templates have not been trained!" << std::endl;
360  return false;
361  }
362 
363  if( classLikelihoods.size() != numTemplates ) classLikelihoods.resize(numTemplates);
364  if( classDistances.size() != numTemplates ) classDistances.resize(numTemplates);
365 
366  predictedClassLabel = 0;
367  maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
368  for(UINT k=0; k<classLikelihoods.size(); k++){
369  classLikelihoods[k] = 0;
370  classDistances[k] = DEFAULT_NULL_LIKELIHOOD_VALUE;
371  }
372 
373  if( numInputDimensions != inputTimeSeries.getNumCols() ){
374  errorLog << __GRT_LOG__ << " The number of features in the model (" << numInputDimensions << ") do not match that of the input time series (" << inputTimeSeries.getNumCols() << ")" << std::endl;
375  return false;
376  }
377 
378  //Perform any preprocessing if requried
379  MatrixFloat *timeSeriesPtr = &inputTimeSeries;
380  MatrixFloat processedTimeSeries;
381  MatrixFloat tempMatrix;
382  if(useScaling){
383  scaleData(*timeSeriesPtr,processedTimeSeries);
384  timeSeriesPtr = &processedTimeSeries;
385  }
386 
387  //Normalize the data if needed
388  if( useZNormalisation ){
389  znormData(*timeSeriesPtr,processedTimeSeries);
390  timeSeriesPtr = &processedTimeSeries;
391  }
392 
393  //Smooth the data if required
394  if( useSmoothing ){
395  smoothData(*timeSeriesPtr,smoothingFactor,tempMatrix);
396  timeSeriesPtr = &tempMatrix;
397  }
398 
399  //Offset the timeseries if required
400  if( offsetUsingFirstSample ){
401  offsetTimeseries( *timeSeriesPtr );
402  }
403 
404  //Make the prediction by finding the closest template
405  Float sum = 0;
406  if( distanceMatrices.size() != numTemplates ) distanceMatrices.resize( numTemplates );
407  if( warpPaths.size() != numTemplates ) warpPaths.resize( numTemplates );
408 
409  //Test the timeSeries against all the templates in the timeSeries buffer
410  for(UINT k=0; k<numTemplates; k++){
411  //Perform DTW
412  classDistances[k] = computeDistance(templatesBuffer[k].timeSeries,*timeSeriesPtr,distanceMatrices[k],warpPaths[k]);
413 
414  if(classDistances[k] > 1e-8)
415  {
416  classLikelihoods[k] = 1.0 / classDistances[k];
417  }
418  else
419  {
420  classLikelihoods[k] = 1e8;
421  }
422 
423  sum += classLikelihoods[k];
424  }
425 
426  //See which gave the min distance
427  UINT closestTemplateIndex = 0;
428  bestDistance = classDistances[0];
429  for(UINT k=1; k<numTemplates; k++){
430  if( classDistances[k] < bestDistance ){
431  bestDistance = classDistances[k];
432  closestTemplateIndex = k;
433  }
434  }
435 
436  //Normalize the class likelihoods and check which class has the maximum likelihood
437  UINT maxLikelihoodIndex = 0;
438  maxLikelihood = 0;
439  if( sum > 0 ){
440  for(UINT k=0; k<numTemplates; k++){
441  classLikelihoods[k] /= sum;
442  if( classLikelihoods[k] > maxLikelihood ){
443  maxLikelihood = classLikelihoods[k];
444  maxLikelihoodIndex = k;
445  }
446  }
447  }
448 
449  if( useNullRejection ){
450 
451  switch( rejectionMode ){
452  case TEMPLATE_THRESHOLDS:
453  if( bestDistance <= nullRejectionThresholds[ closestTemplateIndex ] ) predictedClassLabel = templatesBuffer[ closestTemplateIndex ].classLabel;
454  else predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
455  break;
456  case CLASS_LIKELIHOODS:
457  if( maxLikelihood >= nullRejectionLikelihoodThreshold) predictedClassLabel = templatesBuffer[ maxLikelihoodIndex ].classLabel;
458  else predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
459  break;
460  case THRESHOLDS_AND_LIKELIHOODS:
461  if( bestDistance <= nullRejectionThresholds[ closestTemplateIndex ] && maxLikelihood >= nullRejectionLikelihoodThreshold)
462  predictedClassLabel = templatesBuffer[ closestTemplateIndex ].classLabel;
463  else predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
464  break;
465  default:
466  errorLog << __GRT_LOG__ << " Unknown RejectionMode!" << std::endl;
467  return false;
468  break;
469  }
470 
471  }else predictedClassLabel = templatesBuffer[ closestTemplateIndex ].classLabel;
472 
473  return true;
474 }
475 
476 bool DTW::predict_( VectorFloat &inputVector ){
477 
478  if( !trained ){
479  errorLog << __GRT_LOG__ << " The model has not been trained!" << std::endl;
480  return false;
481  }
482  predictedClassLabel = 0;
483  maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
484  std::fill(classLikelihoods.begin(),classLikelihoods.end(),DEFAULT_NULL_LIKELIHOOD_VALUE);
485  std::fill(classDistances.begin(),classDistances.end(),0);
486 
487  if( numInputDimensions != inputVector.getSize() ){
488  errorLog << __GRT_LOG__ << " The number of features in the model " << numInputDimensions << " does not match that of the input Vector " << inputVector.size() << std::endl;
489  return false;
490  }
491 
492  //Add the new input to the circular buffer
493  continuousInputDataBuffer.push_back( inputVector );
494 
495  if( continuousInputDataBuffer.getNumValuesInBuffer() < averageTemplateLength ){
496  //We haven't got enough samples yet so can't do the prediction
497  return true;
498  }
499 
500  //Copy the data into a temporary matrix
501  const UINT M = continuousInputDataBuffer.getSize();
502  const UINT N = numInputDimensions;
503  MatrixFloat predictionTimeSeries(M,N);
504  for(UINT i=0; i<M; i++){
505  for(UINT j=0; j<N; j++){
506  predictionTimeSeries[i][j] = continuousInputDataBuffer[i][j];
507  }
508  }
509 
510  //Run the prediction
511  return predict( predictionTimeSeries );
512 
513 }
514 
515 bool DTW::reset(){
516  continuousInputDataBuffer.clear();
517  if( trained ){
518  continuousInputDataBuffer.resize(averageTemplateLength,VectorFloat(numInputDimensions,0));
520  }
521  return true;
522 }
523 
524 bool DTW::clear(){
525 
526  //Clear the Classifier variables
528 
529  //Clear the DTW model
530  templatesBuffer.clear();
531  distanceMatrices.clear();
532  warpPaths.clear();
533  continuousInputDataBuffer.clear();
534 
535  return true;
536 }
537 
539 
540  if(!trained) return false;
541 
542  //Copy the null rejection thresholds into one buffer so they can easily be accessed from the base class
543  nullRejectionThresholds.resize(numTemplates);
544 
545  for(UINT k=0; k<numTemplates; k++){
546  //The threshold is set as the mean distance plus gamma standard deviations
547  nullRejectionThresholds[k] = templatesBuffer[k].trainingMu + (templatesBuffer[k].trainingSigma * nullRejectionCoeff);
548  }
549 
550  return true;
551 }
552 
554 
555  if( newTemplates.size() == templatesBuffer.size() ){
556  templatesBuffer = newTemplates;
557  //Make sure the class labels have not changed
558  classLabels.resize( templatesBuffer.size() );
559  for(UINT i=0; i<templatesBuffer.size(); i++){
560  classLabels[i] = templatesBuffer[i].classLabel;
561  }
562  return true;
563  }
564  return false;
565 }
566 
568 
569 Float DTW::computeDistance(MatrixFloat &timeSeriesA,MatrixFloat &timeSeriesB,MatrixFloat &distanceMatrix,Vector< IndexDist > &warpPath){
570 
571  const int M = timeSeriesA.getNumRows();
572  const int N = timeSeriesB.getNumRows();
573  const int C = timeSeriesA.getNumCols();
574  int i,j,k,index = 0;
575  Float totalDist,v,normFactor = 0.;
576 
577  warpPath.clear();
578  if( int(distanceMatrix.getNumRows()) != M || int(distanceMatrix.getNumCols()) != N ){
579  distanceMatrix.resize(M, N);
580  }
581 
582  switch (distanceMethod) {
583  case (ABSOLUTE_DIST):
584  for(i=0; i<M; i++){
585  for(j=0; j<N; j++){
586  distanceMatrix[i][j] = 0.0;
587  for(k=0; k< C; k++){
588  distanceMatrix[i][j] += fabs(timeSeriesA[i][k]-timeSeriesB[j][k]);
589  }
590  }
591  }
592  break;
593  case (EUCLIDEAN_DIST):
594  //Calculate Euclidean Distance for all possible values
595  for(i=0; i<M; i++){
596  for(j=0; j<N; j++){
597  distanceMatrix[i][j] = 0.0;
598  for(k=0; k< C; k++){
599  distanceMatrix[i][j] += SQR( timeSeriesA[i][k]-timeSeriesB[j][k] );
600  }
601  distanceMatrix[i][j] = sqrt( distanceMatrix[i][j] );
602  }
603  }
604  break;
605  case (NORM_ABSOLUTE_DIST):
606  for(i=0; i<M; i++){
607  for(j=0; j<N; j++){
608  distanceMatrix[i][j] = 0.0;
609  for(k=0; k< C; k++){
610  distanceMatrix[i][j] += fabs(timeSeriesA[i][k]-timeSeriesB[j][k]);
611  }
612  distanceMatrix[i][j]/=N;
613  }
614  }
615  break;
616  default:
617  errorLog<< __GRT_LOG__ << " Unknown distance method: "<<distanceMethod<< std::endl;
618  return -1;
619  break;
620  }
621 
622  //Run the recursive search function to build the cost matrix
623  Float distance = sqrt( d(M-1,N-1,distanceMatrix,M,N) );
624 
625  if( grt_isinf(distance) || grt_isnan(distance) ){
626  warningLog << __GRT_LOG__ << " Distance Matrix Values are INF!" << std::endl;
627  return INFINITY;
628  }
629 
630  //cout << "DIST: " << distance << std::endl;
631 
632  //The distMatrix values are negative so make them positive
633  for(i=0; i<M; i++){
634  for(j=0; j<N; j++){
635  distanceMatrix[i][j] = fabs( distanceMatrix[i][j] );
636  }
637  }
638 
639  //Now Create the Warp Path through the cost matrix, starting at the end
640  i=M-1;
641  j=N-1;
642  totalDist = distanceMatrix[i][j];
643  warpPath.push_back( IndexDist(i,j,distanceMatrix[i][j]) );
644 
645  //Use dynamic programming to navigate through the cost matrix until [0][0] has been reached
646  normFactor = 1;
647  while( true ) {
648  if( i==0 && j==0 ) break;
649  if( i==0 ){ j--; }
650  else{
651  if( j==0 ) i--;
652  else{
653  //Find the minimum cell to move to
655  index = 0;
656  if( distanceMatrix[i-1][j] < v ){ v = distanceMatrix[i-1][j]; index = 1; }
657  if( distanceMatrix[i][j-1] < v ){ v = distanceMatrix[i][j-1]; index = 2; }
658  if( distanceMatrix[i-1][j-1] <= v ){ index = 3; }
659  switch(index){
660  case(1):
661  i--;
662  break;
663  case(2):
664  j--;
665  break;
666  case(3):
667  i--;
668  j--;
669  break;
670  default:
671  warningLog << __GRT_LOG__ << " Could not compute a warping path for the input matrix! Dist: " << distanceMatrix[i-1][j] << " i: " << i << " j: " << j << std::endl;
672  return INFINITY;
673  break;
674  }
675  }
676  }
677  normFactor++;
678  totalDist += distanceMatrix[i][j];
679  warpPath.push_back( IndexDist(i,j,distanceMatrix[i][j]) );
680  }
681 
682  return totalDist/normFactor;
683 }
684 
685 Float DTW::d(int m,int n,MatrixFloat &distanceMatrix,const int M,const int N){
686 
687  Float dist = 0;
688  //The following is based on Matlab code by Eamonn Keogh and Michael Pazzani
689 
690  //If this cell is NAN then it has already been flagged as unreachable
691  if( grt_isnan( distanceMatrix[m][n] ) ){
692  return NAN;
693  }
694 
695  if( constrainWarpingPath ){
696  Float r = ceil( grt_min(M,N)*radius );
697  //Test to see if the current cell is outside of the warping window
698  if( fabs( n-((N-1)/((M-1)/Float(m))) ) > r ){
699  if( n-((N-1)/((M-1)/Float(m))) > 0 ){
700  for(int i=0; i<m; i++){
701  for(int j=n; j<N; j++){
702  distanceMatrix[i][j] = NAN;
703  }
704  }
705  }else{
706  for(int i=m; i<M; i++){
707  for(int j=0; j<n; j++){
708  distanceMatrix[i][j] = NAN;
709  }
710  }
711  }
712  return NAN;
713  }
714  }
715 
716  //If this cell contains a negative value then it has already been searched
717  //The cost is therefore the absolute value of the negative value so return it
718  if( distanceMatrix[m][n] < 0 ){
719  dist = fabs( distanceMatrix[m][n] );
720  return dist;
721  }
722 
723  //Case 1: A warping path has reached the end
724  //Return the contribution of distance
725  //Negate the value, to record the fact that this cell has been visited
726  //End of recursion
727  if( m == 0 && n == 0 ){
728  dist = distanceMatrix[0][0];
729  distanceMatrix[0][0] = -distanceMatrix[0][0];
730  return dist;
731  }
732 
733  //Case 2: we are somewhere in the top row of the matrix
734  //Only need to consider moving left
735  if( m == 0 ){
736  Float contribDist = d(m,n-1,distanceMatrix,M,N);
737 
738  dist = distanceMatrix[m][n] + contribDist;
739 
740  distanceMatrix[m][n] = -dist;
741  return dist;
742  }else{
743  //Case 3: we are somewhere in the left column of the matrix
744  //Only need to consider moving down
745  if ( n == 0) {
746  Float contribDist = d(m-1,n,distanceMatrix,M,N);
747 
748  dist = distanceMatrix[m][n] + contribDist;
749 
750  distanceMatrix[m][n] = -dist;
751  return dist;
752  }else{
753  //Case 4: We are somewhere away from the edges so consider moving in the three main directions
754  Float contribDist1 = d(m-1,n-1,distanceMatrix,M,N);
755  Float contribDist2 = d(m-1,n,distanceMatrix,M,N);
756  Float contribDist3 = d(m,n-1,distanceMatrix,M,N);
757  Float minValue = grt_numeric_limits< Float >::max();
758  int index = 0;
759  if( contribDist1 < minValue ){ minValue = contribDist1; index = 1; }
760  if( contribDist2 < minValue ){ minValue = contribDist2; index = 2; }
761  if( contribDist3 < minValue ){ minValue = contribDist3; index = 3; }
762 
763  switch ( index ) {
764  case 1:
765  dist = distanceMatrix[m][n] + minValue;
766  break;
767  case 2:
768  dist = distanceMatrix[m][n] + minValue;
769  break;
770  case 3:
771  dist = distanceMatrix[m][n] + minValue;
772  break;
773 
774  default:
775  break;
776  }
777 
778  distanceMatrix[m][n] = -dist; //Negate the value to record that it has been visited
779  return dist;
780  }
781  }
782 
783  //This should not happen!
784  return dist;
785 }
786 
787 inline Float DTW::MIN_(Float a,Float b, Float c){
788  Float v = a;
789  if(b<v) v = b;
790  if(c<v) v = c;
791  return v;
792 }
793 
794 
796 
797 void DTW::scaleData(TimeSeriesClassificationData &trainingData){
798 
799  //Scale the data using the min and max values
800  for(UINT i=0; i<trainingData.getNumSamples(); i++){
801  scaleData( trainingData[i].getData(), trainingData[i].getData() );
802  }
803 
804 }
805 
806 void DTW::scaleData(MatrixFloat &data,MatrixFloat &scaledData){
807 
808  const UINT R = data.getNumRows();
809  const UINT C = data.getNumCols();
810 
811  if( scaledData.getNumRows() != R || scaledData.getNumCols() != C ){
812  scaledData.resize(R, C);
813  }
814 
815  //Scale the data using the min and max values
816  for(UINT i=0; i<R; i++)
817  for(UINT j=0; j<C; j++)
818  scaledData[i][j] = grt_scale(data[i][j],ranges[j].minValue,ranges[j].maxValue,0.0,1.0);
819 
820 }
821 
822 void DTW::znormData(TimeSeriesClassificationData &trainingData){
823 
824  for(UINT i=0; i<trainingData.getNumSamples(); i++){
825  znormData( trainingData[i].getData(), trainingData[i].getData() );
826  }
827 
828 }
829 
830 void DTW::znormData(MatrixFloat &data,MatrixFloat &normData){
831 
832  const UINT R = data.getNumRows();
833  const UINT C = data.getNumCols();
834 
835  if( normData.getNumRows() != R || normData.getNumCols() != C ){
836  normData.resize(R,C);
837  }
838 
839  for(UINT j=0; j<C; j++){
840  Float mean = 0.0;
841  Float stdDev = 0.0;
842 
843  //Calculate Mean
844  for(UINT i=0; i<R; i++) mean += data[i][j];
845  mean /= Float(R);
846 
847  //Calculate Std Dev
848  for(UINT i=0; i<R; i++)
849  stdDev += grt_sqr(data[i][j]-mean);
850  stdDev = grt_sqrt( stdDev / (R - 1.0) );
851 
852  if(constrainZNorm && stdDev < 0.01){
853  //Normalize the data to 0 mean
854  for(UINT i=0; i<R; i++)
855  normData[i][j] = (data[i][j] - mean);
856  }else{
857  //Normalize the data to 0 mean and standard deviation of 1
858  for(UINT i=0; i<R; i++)
859  normData[i][j] = (data[i][j] - mean) / stdDev;
860  }
861  }
862 }
863 
864 void DTW::smoothData(VectorFloat &data,UINT smoothFactor,VectorFloat &resultsData){
865 
866  const UINT M = (UINT)data.size();
867  const UINT N = (UINT) floor(Float(M)/Float(smoothFactor));
868  resultsData.resize(N,0);
869  for(UINT i=0; i<N; i++) resultsData[i]=0.0;
870 
871  if(smoothFactor==1 || M<smoothFactor){
872  resultsData = data;
873  return;
874  }
875 
876  for(UINT i=0; i<N; i++){
877  Float mean = 0.0;
878  UINT index = i*smoothFactor;
879  for(UINT x=0; x<smoothFactor; x++){
880  mean += data[index+x];
881  }
882  resultsData[i] = mean/smoothFactor;
883  }
884  //Add on the data that does not fit into the window
885  if(M%smoothFactor!=0.0){
886  Float mean = 0.0;
887  for(UINT i=N*smoothFactor; i<M; i++) mean += data[i];
888  mean/=M-(N*smoothFactor);
889  //Add one to the end of the Vector
890  VectorFloat tempVector(N+1);
891  for(UINT i=0; i<N; i++) tempVector[i] = resultsData[i];
892  tempVector[N] = mean;
893  resultsData = tempVector;
894  }
895 
896 }
897 
898 void DTW::smoothData(MatrixFloat &data,UINT smoothFactor,MatrixFloat &resultsData){
899 
900  const UINT M = data.getNumRows();
901  const UINT C = data.getNumCols();
902  const UINT N = (UINT) floor(Float(M)/Float(smoothFactor));
903  resultsData.resize(N,C);
904 
905  if(smoothFactor==1 || M<smoothFactor){
906  resultsData = data;
907  return;
908  }
909 
910  for(UINT i=0; i<N; i++){
911  for(UINT j=0; j<C; j++){
912  Float mean = 0.0;
913  int index = i*smoothFactor;
914  for(UINT x=0; x<smoothFactor; x++){
915  mean += data[index+x][j];
916  }
917  resultsData[i][j] = mean/smoothFactor;
918  }
919  }
920 
921  //Add on the data that does not fit into the window
922  if(M%smoothFactor!=0.0){
923  VectorFloat mean(C,0.0);
924  for(UINT j=0; j<C; j++){
925  for(UINT i=N*smoothFactor; i<M; i++) mean[j] += data[i][j];
926  mean[j]/=M-(N*smoothFactor);
927  }
928 
929  //Add one row to the end of the Matrix
930  MatrixFloat tempMatrix(N+1,C);
931 
932  for(UINT i=0; i<N; i++)
933  for(UINT j=0; j<C; j++)
934  tempMatrix[i][j] = resultsData[i][j];
935 
936  for(UINT j=0; j<C; j++) tempMatrix[N][j] = mean[j];
937  resultsData = tempMatrix;
938  }
939 
940 }
941 
943 
944 bool DTW::save( std::fstream &file ) const{
945 
946  if(!file.is_open()){
947  errorLog << __GRT_LOG__ << " Could not open file to save data" << std::endl;
948  return false;
949  }
950 
951  file << "GRT_DTW_Model_File_V2.0" << std::endl;
952 
953  //Write the classifier settings to the file
955  errorLog << __GRT_LOG__ << " Failed to save classifier base settings to file!" << std::endl;
956  return false;
957  }
958 
959  file << "DistanceMethod: ";
960  switch(distanceMethod){
961  case(ABSOLUTE_DIST):
962  file <<ABSOLUTE_DIST<< std::endl;
963  break;
964  case(EUCLIDEAN_DIST):
965  file <<EUCLIDEAN_DIST<< std::endl;
966  break;
967  default:
968  file <<ABSOLUTE_DIST<< std::endl;
969  break;
970  }
971  file << "UseSmoothing: "<<useSmoothing<< std::endl;
972  file << "SmoothingFactor: "<<smoothingFactor<< std::endl;
973  file << "UseZNormalisation: "<<useZNormalisation<< std::endl;
974  file << "OffsetUsingFirstSample: " << offsetUsingFirstSample << std::endl;
975  file << "ConstrainWarpingPath: " << constrainWarpingPath << std::endl;
976  file << "Radius: " << radius << std::endl;
977  file << "RejectionMode: " << rejectionMode<< std::endl;
978 
979  if( trained ){
980  file << "NumberOfTemplates: " << numTemplates << std::endl;
981  file << "OverallAverageTemplateLength: " << averageTemplateLength << std::endl;
982  //Save each template
983  for(UINT i=0; i<numTemplates; i++){
984  file << "***************TEMPLATE***************" << std::endl;
985  file << "Template: " << i+1 << std::endl;
986  file << "ClassLabel: " << templatesBuffer[i].classLabel << std::endl;
987  file << "TimeSeriesLength: " << templatesBuffer[i].timeSeries.getNumRows() << std::endl;
988  file << "TemplateThreshold: " << nullRejectionThresholds[i] << std::endl;
989  file << "TrainingMu: " << templatesBuffer[i].trainingMu << std::endl;
990  file << "TrainingSigma: " << templatesBuffer[i].trainingSigma << std::endl;
991  file << "AverageTemplateLength: " << templatesBuffer[i].averageTemplateLength << std::endl;
992  file << "TimeSeries: " << std::endl;
993  for(UINT k=0; k<templatesBuffer[i].timeSeries.getNumRows(); k++){
994  for(UINT j=0; j<templatesBuffer[i].timeSeries.getNumCols(); j++){
995  file << templatesBuffer[i].timeSeries[k][j] << "\t";
996  }
997  file << std::endl;
998  }
999  }
1000  }
1001 
1002  return true;
1003 }
1004 
1005 bool DTW::load( std::fstream &file ){
1006 
1007  std::string word;
1008  UINT timeSeriesLength;
1009  UINT ts;
1010 
1011  if(!file.is_open())
1012  {
1013  errorLog << __GRT_LOG__ << " Failed to open file!" << std::endl;
1014  return false;
1015  }
1016 
1017  file >> word;
1018 
1019  //Check to see if we should load a legacy file
1020  if( word == "GRT_DTW_Model_File_V1.0" ){
1021  return loadLegacyModelFromFile( file );
1022  }
1023 
1024  //Check to make sure this is a file with the DTW File Format
1025  if(word != "GRT_DTW_Model_File_V2.0"){
1026  errorLog << __GRT_LOG__ << " Unknown file header!" << std::endl;
1027  return false;
1028  }
1029 
1030  //Load the base settings from the file
1032  errorLog << __GRT_LOG__ << " Failed to load base settings from file!" << std::endl;
1033  return false;
1034  }
1035 
1036  //Check and load the Distance Method
1037  file >> word;
1038  if(word != "DistanceMethod:"){
1039  errorLog << __GRT_LOG__ << " Failed to find DistanceMethod!" << std::endl;
1040  return false;
1041  }
1042  file >> distanceMethod;
1043 
1044  //Check and load if Smoothing is used
1045  file >> word;
1046  if(word != "UseSmoothing:"){
1047  errorLog << __GRT_LOG__ << " Failed to find UseSmoothing!" << std::endl;
1048  return false;
1049  }
1050  file >> useSmoothing;
1051 
1052  //Check and load what the smoothing factor is
1053  file >> word;
1054  if(word != "SmoothingFactor:"){
1055  errorLog << __GRT_LOG__ << " Failed to find SmoothingFactor!" << std::endl;
1056  return false;
1057  }
1058  file >> smoothingFactor;
1059 
1060  //Check and load if ZNormalization is used
1061  file >> word;
1062  if(word != "UseZNormalisation:"){
1063  errorLog << __GRT_LOG__ << " Failed to find UseZNormalisation!" << std::endl;
1064  return false;
1065  }
1066  file >> useZNormalisation;
1067 
1068  //Check and load if OffsetUsingFirstSample is used
1069  file >> word;
1070  if(word != "OffsetUsingFirstSample:"){
1071  errorLog << __GRT_LOG__ << " Failed to find OffsetUsingFirstSample!" << std::endl;
1072  return false;
1073  }
1074  file >> offsetUsingFirstSample;
1075 
1076  //Check and load if ConstrainWarpingPath is used
1077  file >> word;
1078  if(word != "ConstrainWarpingPath:"){
1079  errorLog << __GRT_LOG__ << " Failed to find ConstrainWarpingPath!" << std::endl;
1080  return false;
1081  }
1082  file >> constrainWarpingPath;
1083 
1084  //Check and load if ZNormalization is used
1085  file >> word;
1086  if(word != "Radius:"){
1087  errorLog << __GRT_LOG__ << " Failed to find Radius!" << std::endl;
1088  return false;
1089  }
1090  file >> radius;
1091 
1092  //Check and load if Scaling is used
1093  file >> word;
1094  if(word != "RejectionMode:"){
1095  errorLog << __GRT_LOG__ << " Failed to find RejectionMode!" << std::endl;
1096  return false;
1097  }
1098  file >> rejectionMode;
1099 
1100  if( trained ){
1101 
1102  //Check and load the Number of Templates
1103  file >> word;
1104  if(word != "NumberOfTemplates:"){
1105  errorLog << __GRT_LOG__ << " Failed to find NumberOfTemplates!" << std::endl;
1106  return false;
1107  }
1108  file >> numTemplates;
1109 
1110  //Check and load the overall average template length
1111  file >> word;
1112  if(word != "OverallAverageTemplateLength:"){
1113  errorLog << __GRT_LOG__ << " Failed to find OverallAverageTemplateLength!" << std::endl;
1114  return false;
1115  }
1116  file >> averageTemplateLength;
1117 
1118  //Clean and reset the memory
1119  templatesBuffer.resize(numTemplates);
1120  classLabels.resize(numTemplates);
1121  nullRejectionThresholds.resize(numTemplates);
1122 
1123  //Load each template
1124  for(UINT i=0; i<numTemplates; i++){
1125  //Check we have the correct template
1126  file >> word;
1127  if( word != "***************TEMPLATE***************" ){
1128  clear();
1129  errorLog << __GRT_LOG__ << " Failed to find template header!" << std::endl;
1130  return false;
1131  }
1132 
1133  //Load the template number
1134  file >> word;
1135  if(word != "Template:"){
1136  clear();
1137  errorLog << __GRT_LOG__ << " ailed to find Template Number!" << std::endl;
1138  return false;
1139  }
1140 
1141  //Check the template number
1142  file >> ts;
1143  if(ts!=i+1){
1144  clear();
1145  errorLog << __GRT_LOG__ << " Invalid Template Number: " << ts << std::endl;
1146  return false;
1147  }
1148 
1149  //Get the class label of this template
1150  file >> word;
1151  if(word != "ClassLabel:"){
1152  clear();
1153  errorLog << __GRT_LOG__ << " Failed to find ClassLabel!" << std::endl;
1154  return false;
1155  }
1156  file >> templatesBuffer[i].classLabel;
1157  classLabels[i] = templatesBuffer[i].classLabel;
1158 
1159  //Get the time series length
1160  file >> word;
1161  if(word != "TimeSeriesLength:"){
1162  clear();
1163  errorLog << __GRT_LOG__ << " Failed to find TimeSeriesLength!" << std::endl;
1164  return false;
1165  }
1166  file >> timeSeriesLength;
1167 
1168  //Resize the buffers
1169  templatesBuffer[i].timeSeries.resize(timeSeriesLength,numInputDimensions);
1170 
1171  //Get the template threshold
1172  file >> word;
1173  if(word != "TemplateThreshold:"){
1174  clear();
1175  errorLog << __GRT_LOG__ << " Failed to find TemplateThreshold!" << std::endl;
1176  return false;
1177  }
1178  file >> nullRejectionThresholds[i];
1179 
1180  //Get the mu values
1181  file >> word;
1182  if(word != "TrainingMu:"){
1183  clear();
1184  errorLog << __GRT_LOG__ << " Failed to find TrainingMu!" << std::endl;
1185  return false;
1186  }
1187  file >> templatesBuffer[i].trainingMu;
1188 
1189  //Get the sigma values
1190  file >> word;
1191  if(word != "TrainingSigma:"){
1192  clear();
1193  errorLog << __GRT_LOG__ << " Failed to find TrainingSigma!" << std::endl;
1194  return false;
1195  }
1196  file >> templatesBuffer[i].trainingSigma;
1197 
1198  //Get the AverageTemplateLength value
1199  file >> word;
1200  if(word != "AverageTemplateLength:"){
1201  clear();
1202  errorLog << __GRT_LOG__ << " Failed to find AverageTemplateLength!" << std::endl;
1203  return false;
1204  }
1205  file >> templatesBuffer[i].averageTemplateLength;
1206 
1207  //Get the data
1208  file >> word;
1209  if(word != "TimeSeries:"){
1210  clear();
1211  errorLog << __GRT_LOG__ << " Failed to find template timeseries!" << std::endl;
1212  return false;
1213  }
1214  for(UINT k=0; k<timeSeriesLength; k++)
1215  for(UINT j=0; j<numInputDimensions; j++)
1216  file >> templatesBuffer[i].timeSeries[k][j];
1217  }
1218 
1219  //Resize the prediction results to make sure it is setup for realtime prediction
1220  continuousInputDataBuffer.clear();
1221  continuousInputDataBuffer.resize(averageTemplateLength,VectorFloat(numInputDimensions,0));
1222  maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
1223  bestDistance = DEFAULT_NULL_DISTANCE_VALUE;
1224  classLikelihoods.resize(numClasses,DEFAULT_NULL_LIKELIHOOD_VALUE);
1225  classDistances.resize(numClasses,DEFAULT_NULL_DISTANCE_VALUE);
1226  }
1227 
1228  return true;
1229 }
1230 bool DTW::setRejectionMode(UINT rejectionMode){
1231  if( rejectionMode == TEMPLATE_THRESHOLDS || rejectionMode == CLASS_LIKELIHOODS || rejectionMode == THRESHOLDS_AND_LIKELIHOODS ){
1232  this->rejectionMode = rejectionMode;
1233  return true;
1234  }
1235  return false;
1236 }
1237 
1238 bool DTW::setNullRejectionThreshold(Float nullRejectionLikelihoodThreshold)
1239 {
1240  this->nullRejectionLikelihoodThreshold = nullRejectionLikelihoodThreshold;
1241  return true;
1242 }
1243 
1244 bool DTW::setOffsetTimeseriesUsingFirstSample(bool offsetUsingFirstSample){
1245  this->offsetUsingFirstSample = offsetUsingFirstSample;
1246  return true;
1247 }
1248 
1249 bool DTW::setContrainWarpingPath(bool constrain){
1250  this->constrainWarpingPath = constrain;
1251  return true;
1252 }
1253 
1254 bool DTW::setWarpingRadius(Float radius){
1255  this->radius = radius;
1256  return true;
1257 }
1258 
1259 bool DTW::enableZNormalization(bool useZNormalisation,bool constrainZNorm){
1260  this->useZNormalisation = useZNormalisation;
1261  this->constrainZNorm = constrainZNorm;
1262  return true;
1263 }
1264 
1265 bool DTW::enableTrimTrainingData(bool trimTrainingData,Float trimThreshold,Float maximumTrimPercentage){
1266 
1267  if( trimThreshold < 0 || trimThreshold > 1 ){
1268  warningLog << __GRT_LOG__ << " Failed to set trimTrainingData. The trimThreshold must be in the range of [0 1]" << std::endl;
1269  return false;
1270  }
1271  if( maximumTrimPercentage < 0 || maximumTrimPercentage > 100 ){
1272  warningLog << __GRT_LOG__ << " Failed to set trimTrainingData. The maximumTrimPercentage must be a valid percentage in the range of [0 100]" << std::endl;
1273  return false;
1274  }
1275 
1276  this->trimTrainingData = trimTrainingData;
1277  this->trimThreshold = trimThreshold;
1278  this->maximumTrimPercentage = maximumTrimPercentage;
1279  return true;
1280 }
1281 
1282 void DTW::offsetTimeseries(MatrixFloat &timeseries){
1283  VectorFloat firstRow = timeseries.getRow(0);
1284  for(UINT i=0; i<timeseries.getNumRows(); i++){
1285  for(UINT j=0; j<timeseries.getNumCols(); j++){
1286  timeseries[i][j] -= firstRow[j];
1287  }
1288  }
1289 }
1290 
1291 bool DTW::loadLegacyModelFromFile( std::fstream &file ){
1292 
1293  std::string word;
1294  UINT timeSeriesLength;
1295  UINT ts;
1296 
1297  //Check and load the Number of Dimensions
1298  file >> word;
1299  if(word != "NumberOfDimensions:"){
1300  errorLog << __GRT_LOG__ << " Failed to find NumberOfDimensions!" << std::endl;
1301  return false;
1302  }
1303  file >> numInputDimensions;
1304 
1305  //Check and load the Number of Classes
1306  file >> word;
1307  if(word != "NumberOfClasses:"){
1308  errorLog << __GRT_LOG__ << " Failed to find NumberOfClasses!" << std::endl;
1309  return false;
1310  }
1311  file >> numClasses;
1312 
1313  //Check and load the Number of Templates
1314  file >> word;
1315  if(word != "NumberOfTemplates:"){
1316  errorLog << __GRT_LOG__ << " Failed to find NumberOfTemplates!" << std::endl;
1317  return false;
1318  }
1319  file >> numTemplates;
1320 
1321  //Check and load the Distance Method
1322  file >> word;
1323  if(word != "DistanceMethod:"){
1324  errorLog << __GRT_LOG__ << " Failed to find DistanceMethod!" << std::endl;
1325  return false;
1326  }
1327  file >> distanceMethod;
1328 
1329  //Check and load if UseNullRejection is used
1330  file >> word;
1331  if(word != "UseNullRejection:"){
1332  errorLog << __GRT_LOG__ << " Failed to find UseNullRejection!" << std::endl;
1333  return false;
1334  }
1335  file >> useNullRejection;
1336 
1337  //Check and load if Smoothing is used
1338  file >> word;
1339  if(word != "UseSmoothing:"){
1340  errorLog << __GRT_LOG__ << " Failed to find UseSmoothing!" << std::endl;
1341  return false;
1342  }
1343  file >> useSmoothing;
1344 
1345  //Check and load what the smoothing factor is
1346  file >> word;
1347  if(word != "SmoothingFactor:"){
1348  errorLog << __GRT_LOG__ << " Failed to find SmoothingFactor!" << std::endl;
1349  return false;
1350  }
1351  file >> smoothingFactor;
1352 
1353  //Check and load if Scaling is used
1354  file >> word;
1355  if(word != "UseScaling:"){
1356  errorLog << __GRT_LOG__ << " Failed to find UseScaling!" << std::endl;
1357  return false;
1358  }
1359  file >> useScaling;
1360 
1361  //Check and load if ZNormalization is used
1362  file >> word;
1363  if(word != "UseZNormalisation:"){
1364  errorLog << __GRT_LOG__ << " Failed to find UseZNormalisation!" << std::endl;
1365  return false;
1366  }
1367  file >> useZNormalisation;
1368 
1369  //Check and load if OffsetUsingFirstSample is used
1370  file >> word;
1371  if(word != "OffsetUsingFirstSample:"){
1372  errorLog << __GRT_LOG__ << " Failed to find OffsetUsingFirstSample!" << std::endl;
1373  return false;
1374  }
1375  file >> offsetUsingFirstSample;
1376 
1377  //Check and load if ConstrainWarpingPath is used
1378  file >> word;
1379  if(word != "ConstrainWarpingPath:"){
1380  errorLog << __GRT_LOG__ << " Failed to find ConstrainWarpingPath!" << std::endl;
1381  return false;
1382  }
1383  file >> constrainWarpingPath;
1384 
1385  //Check and load if ZNormalization is used
1386  file >> word;
1387  if(word != "Radius:"){
1388  errorLog << __GRT_LOG__ << " Failed to find Radius!" << std::endl;
1389  return false;
1390  }
1391  file >> radius;
1392 
1393  //Check and load if Scaling is used
1394  file >> word;
1395  if(word != "RejectionMode:"){
1396  errorLog << __GRT_LOG__ << " Failed to find RejectionMode!" << std::endl;
1397  return false;
1398  }
1399  file >> rejectionMode;
1400 
1401  //Check and load gamma
1402  file >> word;
1403  if(word != "NullRejectionCoeff:"){
1404  errorLog << __GRT_LOG__ << " Failed to find NullRejectionCoeff!" << std::endl;
1405  return false;
1406  }
1407  file >> nullRejectionCoeff;
1408 
1409  //Check and load the overall average template length
1410  file >> word;
1411  if(word != "OverallAverageTemplateLength:"){
1412  errorLog << __GRT_LOG__ << " Failed to find OverallAverageTemplateLength!" << std::endl;
1413  return false;
1414  }
1415  file >> averageTemplateLength;
1416 
1417  //Clean and reset the memory
1418  templatesBuffer.resize(numTemplates);
1419  classLabels.resize(numTemplates);
1420  nullRejectionThresholds.resize(numTemplates);
1421 
1422  //Load each template
1423  for(UINT i=0; i<numTemplates; i++){
1424  //Check we have the correct template
1425  file >> word;
1426  while(word != "Template:"){
1427  file >> word;
1428  }
1429  file >> ts;
1430 
1431  //Check the template number
1432  if(ts!=i+1){
1433  numTemplates=0;
1434  trained = false;
1435  errorLog << __GRT_LOG__ << " Failed to find Invalid Template Number!" << std::endl;
1436  return false;
1437  }
1438 
1439  //Get the class label of this template
1440  file >> word;
1441  if(word != "ClassLabel:"){
1442  numTemplates=0;
1443  trained = false;
1444  errorLog << __GRT_LOG__ << " Failed to find ClassLabel!" << std::endl;
1445  return false;
1446  }
1447  file >> templatesBuffer[i].classLabel;
1448  classLabels[i] = templatesBuffer[i].classLabel;
1449 
1450  //Get the time series length
1451  file >> word;
1452  if(word != "TimeSeriesLength:"){
1453  numTemplates=0;
1454  trained = false;
1455  errorLog << __GRT_LOG__ << " Failed to find TimeSeriesLength!" << std::endl;
1456  return false;
1457  }
1458  file >> timeSeriesLength;
1459 
1460  //Resize the buffers
1461  templatesBuffer[i].timeSeries.resize(timeSeriesLength,numInputDimensions);
1462 
1463  //Get the template threshold
1464  file >> word;
1465  if(word != "TemplateThreshold:"){
1466  numTemplates=0;
1467  trained = false;
1468  errorLog << __GRT_LOG__ << " Failed to find TemplateThreshold!" << std::endl;
1469  return false;
1470  }
1471  file >> nullRejectionThresholds[i];
1472 
1473  //Get the mu values
1474  file >> word;
1475  if(word != "TrainingMu:"){
1476  numTemplates=0;
1477  trained = false;
1478  errorLog << __GRT_LOG__ << " Failed to find TrainingMu!" << std::endl;
1479  return false;
1480  }
1481  file >> templatesBuffer[i].trainingMu;
1482 
1483  //Get the sigma values
1484  file >> word;
1485  if(word != "TrainingSigma:"){
1486  numTemplates=0;
1487  trained = false;
1488  errorLog << __GRT_LOG__ << " Failed to find TrainingSigma!" << std::endl;
1489  return false;
1490  }
1491  file >> templatesBuffer[i].trainingSigma;
1492 
1493  //Get the AverageTemplateLength value
1494  file >> word;
1495  if(word != "AverageTemplateLength:"){
1496  numTemplates=0;
1497  trained = false;
1498  errorLog << __GRT_LOG__ << " Failed to find AverageTemplateLength!" << std::endl;
1499  return false;
1500  }
1501  file >> templatesBuffer[i].averageTemplateLength;
1502 
1503  //Get the data
1504  file >> word;
1505  if(word != "TimeSeries:"){
1506  numTemplates=0;
1507  trained = false;
1508  errorLog << __GRT_LOG__ << " Failed to find template timeseries!" << std::endl;
1509  return false;
1510  }
1511  for(UINT k=0; k<timeSeriesLength; k++)
1512  for(UINT j=0; j<numInputDimensions; j++)
1513  file >> templatesBuffer[i].timeSeries[k][j];
1514 
1515  //Check for the footer
1516  file >> word;
1517  if(word != "***************************"){
1518  numTemplates=0;
1519  numClasses = 0;
1520  numInputDimensions=0;
1521  trained = false;
1522  errorLog << __GRT_LOG__ << " Failed to find template footer!" << std::endl;
1523  return false;
1524  }
1525  }
1526 
1527  //Resize the prediction results to make sure it is setup for realtime prediction
1528  continuousInputDataBuffer.clear();
1529  continuousInputDataBuffer.resize(averageTemplateLength,VectorFloat(numInputDimensions,0));
1530  maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
1531  bestDistance = DEFAULT_NULL_DISTANCE_VALUE;
1532  classLikelihoods.resize(numClasses,DEFAULT_NULL_LIKELIHOOD_VALUE);
1533  classDistances.resize(numClasses,DEFAULT_NULL_DISTANCE_VALUE);
1534 
1535  trained = true;
1536 
1537  return true;
1538 }
1539 
1540 GRT_END_NAMESPACE
virtual bool predict_(VectorFloat &inputVector)
Definition: DTW.cpp:476
bool saveBaseSettingsToFile(std::fstream &file) const
Definition: Classifier.cpp:274
bool push_back(const T &value)
virtual bool predict(VectorFloat inputVector)
Definition: MLBase.cpp:135
#define DEFAULT_NULL_LIKELIHOOD_VALUE
Definition: Classifier.h:33
virtual bool save(std::fstream &file) const
Definition: DTW.cpp:944
std::string getClassifierType() const
Definition: Classifier.cpp:175
bool setRejectionMode(UINT rejectionMode)
Definition: DTW.cpp:1230
bool setNumDimensions(const UINT numDimensions)
virtual bool train_(TimeSeriesClassificationData &trainingData)
Definition: DTW.cpp:141
virtual bool resize(const unsigned int size)
Definition: Vector.h:133
bool enableTrimTrainingData(bool trimTrainingData, Float trimThreshold, Float maximumTrimPercentage)
Definition: DTW.cpp:1265
UINT getSize() const
Definition: Vector.h:201
bool setContrainWarpingPath(bool constrain)
Definition: DTW.cpp:1249
bool enableZNormalization(bool useZNormalization, bool constrainZNorm=true)
Definition: DTW.cpp:1259
bool setWarpingRadius(Float radius)
Definition: DTW.cpp:1254
DTW & operator=(const DTW &rhs)
Definition: DTW.cpp:73
bool setModels(Vector< DTWTemplate > newTemplates)
Definition: DTW.cpp:553
Vector< ClassTracker > getClassTracker() const
static std::string getId()
Definition: DTW.cpp:28
Definition: DTW.h:91
unsigned int getNumValuesInBuffer() const
bool copyBaseVariables(const Classifier *classifier)
Definition: Classifier.cpp:101
bool loadBaseSettingsFromFile(std::fstream &file)
Definition: Classifier.cpp:321
bool setNullRejectionThreshold(Float nullRejectionLikelihoodThreshold)
Definition: DTW.cpp:1238
unsigned int getNumRows() const
Definition: Matrix.h:574
virtual ~DTW(void)
Definition: DTW.cpp:70
unsigned int getNumCols() const
Definition: Matrix.h:581
bool addSample(const UINT classLabel, const MatrixFloat &trainingSample)
virtual bool recomputeNullRejectionThresholds()
Definition: DTW.cpp:538
VectorFloat getRow(const unsigned int r) const
Definition: MatrixFloat.h:107
TimeSeriesClassificationData getClassData(const UINT classLabel) const
DTW(bool useScaling=false, bool useNullRejection=false, Float nullRejectionCoeff=3.0, UINT rejectionMode=DTW::TEMPLATE_THRESHOLDS, bool dtwConstrain=true, Float radius=0.2, bool offsetUsingFirstSample=false, bool useSmoothing=false, UINT smoothingFactor=5, Float nullRejectionLikelihoodThreshold=0.99)
Definition: DTW.cpp:33
virtual bool deepCopyFrom(const Classifier *classifier)
Definition: DTW.cpp:105
virtual bool reset()
Definition: DTW.cpp:515
virtual bool resize(const unsigned int r, const unsigned int c)
Definition: Matrix.h:245
bool trimTimeSeries(TimeSeriesClassificationSample &timeSeries)
Definition: DTW.h:34
Definition: Vector.h:41
virtual bool clear()
Definition: DTW.cpp:524
bool setOffsetTimeseriesUsingFirstSample(bool offsetUsingFirstSample)
Definition: DTW.cpp:1244
virtual bool clear()
Definition: Classifier.cpp:151
virtual bool load(std::fstream &file)
Definition: DTW.cpp:1005
This is the main base class that all GRT Classification algorithms should inherit from...
Definition: Classifier.h:41
unsigned int getSize() const
bool resize(const unsigned int newBufferSize)