GestureRecognitionToolkit  Version: 0.2.0
The Gesture Recognition Toolkit (GRT) is a cross-platform, open-source, c++ machine learning library for real-time gesture recognition.
ContinuousHiddenMarkovModel.cpp
1 /*
2 GRT MIT License
3 Copyright (c) <2012> <Nicholas Gillian, Media Lab, MIT>
4 
5 Permission is hereby granted, free of charge, to any person obtaining a copy of this software
6 and associated documentation files (the "Software"), to deal in the Software without restriction,
7 including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
9 subject to the following conditions:
10 
11 The above copyright notice and this permission notice shall be included in all copies or substantial
12 portions of the Software.
13 
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
15 LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
16 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
17 WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
18 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19 */
20 
21 #define GRT_DLL_EXPORTS
23 #include "HMMEnums.h"
24 
25 GRT_BEGIN_NAMESPACE
26 
27 //Init the model with a set number of states and symbols
28 ContinuousHiddenMarkovModel::ContinuousHiddenMarkovModel(const UINT downsampleFactor,const UINT delta,const bool autoEstimateSigma,const Float sigma){
29 
30  clear();
31  this->downsampleFactor = downsampleFactor;
32  this->delta = delta;
33  this->autoEstimateSigma = autoEstimateSigma;
34  this->sigma = sigma;
35  modelType = HMMModelTypes::HMM_LEFTRIGHT;
36  cThreshold = 0;
37  useScaling = false;
38 
39  debugLog.setProceedingText("[DEBUG ContinuousHiddenMarkovModel]");
40  errorLog.setProceedingText("[ERROR ContinuousHiddenMarkovModel]");
41  warningLog.setProceedingText("[WARNING ContinuousHiddenMarkovModel]");
42  trainingLog.setProceedingText("[TRAINING ContinuousHiddenMarkovModel]");
43 }
44 
45 ContinuousHiddenMarkovModel::ContinuousHiddenMarkovModel(const ContinuousHiddenMarkovModel &rhs){
46 
47  this->downsampleFactor = rhs.downsampleFactor;
48  this->numStates = rhs.numStates;
49  this->classLabel = rhs.classLabel;
51  this->sigma = rhs.sigma;
52  this->autoEstimateSigma = rhs.autoEstimateSigma;
53  this->sigmaStates = rhs.sigmaStates;
54  this->a = rhs.a;
55  this->b = rhs.b;
56  this->pi = rhs.pi;
57  this->alpha = rhs.alpha;
58  this->c = rhs.c;
60  this->obsSequence = rhs.obsSequence;
61  this->estimatedStates = rhs.estimatedStates;
62  this->modelType = rhs.modelType;
63  this->delta = rhs.delta;
64  this->loglikelihood = rhs.loglikelihood;
65  this->cThreshold = rhs.cThreshold;
66 
67  const MLBase *basePointer = &rhs;
68  this->copyMLBaseVariables( basePointer );
69 
70  debugLog.setProceedingText("[DEBUG ContinuousHiddenMarkovModel]");
71  errorLog.setProceedingText("[ERROR ContinuousHiddenMarkovModel]");
72  warningLog.setProceedingText("[WARNING ContinuousHiddenMarkovModel]");
73  trainingLog.setProceedingText("[TRAINING ContinuousHiddenMarkovModel]");
74 }
75 
76 //Default destructor
77 ContinuousHiddenMarkovModel::~ContinuousHiddenMarkovModel(){
78 
79 }
80 
81 ContinuousHiddenMarkovModel& ContinuousHiddenMarkovModel::operator=(const ContinuousHiddenMarkovModel &rhs){
82 
83  if( this != &rhs ){
84  this->downsampleFactor = rhs.downsampleFactor;
85  this->numStates = rhs.numStates;
86  this->classLabel = rhs.classLabel;
88  this->sigma = rhs.sigma;
89  this->autoEstimateSigma = rhs.autoEstimateSigma;
90  this->sigmaStates = rhs.sigmaStates;
91  this->a = rhs.a;
92  this->b = rhs.b;
93  this->pi = rhs.pi;
94  this->alpha = rhs.alpha;
95  this->c = rhs.c;
97  this->obsSequence = rhs.obsSequence;
98  this->estimatedStates = rhs.estimatedStates;
99  this->modelType = rhs.modelType;
100  this->delta = rhs.delta;
101  this->loglikelihood = rhs.loglikelihood;
102  this->cThreshold = rhs.cThreshold;
103 
104  const MLBase *basePointer = &rhs;
105  this->copyMLBaseVariables( basePointer );
106  }
107 
108  return *this;
109 }
110 
112 
113  if( !trained ){
114  errorLog << "predict_(VectorFloat &x) - The model is not trained!" << std::endl;
115  return false;
116  }
117 
118  if( x.getSize() != numInputDimensions ){
119  errorLog << "predict_(VectorFloat &x) - The input vector size (" << x.getSize() << ") does not match the number of input dimensions (" << numInputDimensions << ")" << std::endl;
120  return false;
121  }
122 
123  //Add the new sample to the circular buffer
125 
126  //Convert the circular buffer to MatrixFloat
127  for(unsigned int i=0; i<observationSequence.getSize(); i++){
128  for(unsigned int j=0; j<numInputDimensions; j++){
129  obsSequence[i][j] = observationSequence[i][j];
130  }
131  }
132 
133  return predict_( obsSequence );
134 }
135 
137 
138  if( !trained ){
139  errorLog << "predict_( MatrixFloat &timeseries ) - The model is not trained!" << std::endl;
140  return false;
141  }
142 
143  if( timeseries.getNumCols() != numInputDimensions ){
144  errorLog << "predict_( MatrixFloat &timeseries ) - The matrix column size (" << timeseries.getNumCols() << ") does not match the number of input dimensions (" << numInputDimensions << ")" << std::endl;
145  return false;
146  }
147 
148  unsigned int t,i,j,k,index = 0;
149  Float maxAlpha = 0;
150  Float norm = 0;
151 
152  //Downsample the observation timeseries using the same downsample factor of the training data
153  const unsigned int timeseriesLength = (int)timeseries.getNumRows();
154  const unsigned int T = (int)floor( timeseriesLength / Float(downsampleFactor) );
155  MatrixFloat obs(T,numInputDimensions);
156  for(j=0; j<numInputDimensions; j++){
157  index = 0;
158  for(i=0; i<T; i++){
159  norm = 0;
160  obs[i][j] = 0;
161  for(k=0; k<downsampleFactor; k++){
162  if( index < timeseriesLength ){
163  obs[i][j] += timeseries[index++][j];
164  norm += 1;
165  }
166  }
167  if( norm > 1 )
168  obs[i][j] /= norm;
169  }
170  }
171 
172  //Resize alpha, c, and the estimated states vector as needed
173  if( alpha.getNumRows() != T || alpha.getNumCols() != numStates ) alpha.resize(T,numStates);
174  if( (unsigned int)c.size() != T ) c.resize(T);
175  if( (unsigned int)estimatedStates.size() != T ) estimatedStates.resize(T);
176 
178  //Step 1: Init at t=0
179  t = 0;
180  c[t] = 0;
181  maxAlpha = 0;
182  for(i=0; i<numStates; i++){
183  alpha[t][i] = pi[i]*gauss(b,obs,sigmaStates,i,t,numInputDimensions);
184  c[t] += alpha[t][i];
185 
186  //Keep track of the best state at time t
187  if( alpha[t][i] > maxAlpha ){
188  maxAlpha = alpha[t][i];
189  estimatedStates[t] = i;
190  }
191  }
192 
193  //Set the inital scaling coeff
194  c[t] = 1.0/c[t];
195 
196  //Scale alpha
197  for(i=0; i<numStates; i++) alpha[t][i] *= c[t];
198 
199  //Step 2: Induction
200  for(t=1; t<T; t++){
201  c[t] = 0.0;
202  maxAlpha = 0;
203  for(j=0; j<numStates; j++){
204  alpha[t][j] = 0.0;
205  for(i=0; i<numStates; i++){
206  alpha[t][j] += alpha[t-1][i] * a[i][j];
207  }
208  alpha[t][j] *= gauss(b,obs,sigmaStates,j,t,numInputDimensions);
209  c[t] += alpha[t][j];
210 
211  //Keep track of the best state at time t
212  if( alpha[t][j] > maxAlpha ){
213  maxAlpha = alpha[t][j];
214  estimatedStates[t] = j;
215  }
216  }
217 
218  //Set the scaling coeff
219  c[t] = 1.0/c[t];
220 
221  //Scale Alpha
222  for(j=0; j<numStates; j++) alpha[t][j] *= c[t];
223  }
224 
225  //Termination
226  loglikelihood = 0.0;
227  for(t=0; t<T; t++) loglikelihood += log( c[t] );
228  loglikelihood = -loglikelihood; //Store the negative log likelihood
229 
230  //Set the phase as the last estimated state, this will give a phase between [0 1]
231  phase = (estimatedStates[T-1]+1.0)/Float(numStates);
232 
233  return true;
234 }
235 
236 bool ContinuousHiddenMarkovModel::train_(TimeSeriesClassificationSample &trainingData){
237 
238  //Clear any previous models
239  clear();
240 
241  //The number of states is simply set as the number of samples in the training sample
242  timeseriesLength = trainingData.getLength();
243  numStates = (unsigned int)floor((double)(timeseriesLength/downsampleFactor));
244  numInputDimensions = trainingData.getNumDimensions();
245  classLabel = trainingData.getClassLabel();
246 
247  //a is simply set as the number of 1/numStates
249  for(unsigned int i=0; i<numStates; i++){
250  for(unsigned int j=0; j<numStates; j++){
251  a[i][j] = 1.0/numStates;
252  }
253  }
254 
255  //b is simply set as the downsampled training sample
256  b.resize(numStates, numInputDimensions);
257 
258  unsigned int index = 0;
259  Float norm = 0;
260  for(unsigned int j=0; j<numInputDimensions; j++){
261  index = 0;
262  for(unsigned int i=0; i<numStates; i++){
263  norm = 0;
264  b[i][j] = 0;
265  for(unsigned int k=0; k<downsampleFactor; k++){
266  if( index < trainingData.getLength() ){
267  b[i][j] += trainingData[index++][j];
268  norm += 1;
269  }
270  }
271  if( norm > 1 )
272  b[i][j] /= norm;
273  }
274  }
275 
276  //Estimate pi
277  pi.resize(numStates);
278 
279  switch( modelType ){
280  case(HMM_ERGODIC):
281  for(UINT i=0; i<numStates; i++){
282  pi[i] = 1.0/numStates;
283  }
284  break;
285  case(HMM_LEFTRIGHT):
286  //Set the state transitions constraints
287  for(UINT i=0; i<numStates; i++){
288  norm = 0;
289  for(UINT j=0; j<numStates; j++){
290  if((j<i) || (j>i+delta)) a[i][j] = 0.0;
291  norm += a[i][j];
292  }
293  if( norm > 0 ){
294  for(UINT j=0; j<numStates; j++){
295  a[i][j] /= norm;
296  }
297  }
298  }
299 
300  //Set pi to start in state 0
301  for(UINT i=0; i<numStates; i++){
302  pi[i] = i==0 ? 1 : 0;
303  }
304  break;
305  default:
306  throw("HMM_ERROR: Unkown model type!");
307  return false;
308  break;
309  }
310 
311  //Setup sigma for each state
312  sigmaStates.resize( numStates, numInputDimensions );
313 
314  if( autoEstimateSigma ){
315 
316  //Estimate the standard dev for each dimension, for each state
317  MatrixFloat meanResults( numStates, numInputDimensions );
318  for(unsigned int j=0; j<numInputDimensions; j++){
319 
320  //Estimate the mean for each state
321  index = 0;
322  for(unsigned int i=0; i<numStates; i++){
323  norm = 0;
324  meanResults[i][j] = 0;
325  for(unsigned int k=0; k<downsampleFactor; k++){
326  if( index < trainingData.getLength() ){
327  meanResults[i][j] += trainingData[index++][j];
328  norm += 1;
329  }
330  }
331  if( norm > 1 ){
332  meanResults[i][j] /= norm;
333  }
334  }
335 
336  //Loop back over the data again and estimate the stddev for each state
337  index = 0;
338  for(unsigned int i=0; i<numStates; i++){
339  norm = 0;
340  sigmaStates[i][j] = 0;
341  for(unsigned int k=0; k<downsampleFactor; k++){
342  if( index < trainingData.getLength() ){
343  sigmaStates[i][j] += SQR( trainingData[index++][j]-meanResults[i][j] );
344  norm += 1;
345  }
346  }
347  if( norm > 1 ){
348  sigmaStates[i][j] = sqrt( 1.0/norm * sigmaStates[i][j] );
349  }
350 
351  if( sigmaStates[i][j] < sigma ){
352  sigmaStates[i][j] = sigma;
353  }
354  }
355  }
356 
357  }else{
358  sigmaStates.setAllValues(sigma);
359  }
360 
361  //Setup the observation buffer for prediction
362  observationSequence.resize( timeseriesLength, VectorFloat(numInputDimensions,0) );
363  obsSequence.resize(timeseriesLength,numInputDimensions);
364  estimatedStates.resize( numStates );
365 
366  //Finally, flag that the model was trained
367  trained = true;
368 
369  return true;
370 }
371 
373 
374  //Reset the base class
375  MLBase::reset();
376 
377  if( trained ){
378  for(unsigned int i=0; i<observationSequence.getSize(); i++){
379  observationSequence.push_back( VectorFloat(numInputDimensions,0) );
380  }
381  }
382 
383  return true;
384 }
385 
387 
388  //Clear the base class
389  MLBase::clear();
390 
391  numStates = 0;
392  loglikelihood = 0;
393  timeseriesLength = 0;
394  a.clear();
395  b.clear();
396  pi.clear();
397  alpha.clear();
398  c.clear();
400  obsSequence.clear();
401  estimatedStates.clear();
402  sigmaStates.clear();
403 
404  return true;
405 }
406 
408 
409  if( trained ){
410  trainingLog << "A: " << std::endl;
411  for(UINT i=0; i<a.getNumRows(); i++){
412  for(UINT j=0; j<a.getNumCols(); j++){
413  trainingLog << a[i][j] << "\t";
414  }
415  trainingLog << std::endl;
416  }
417 
418  trainingLog << "B: " << std::endl;
419  for(UINT i=0; i<b.getNumRows(); i++){
420  for(UINT j=0; j<b.getNumCols(); j++){
421  trainingLog << b[i][j] << "\t";
422  }
423  trainingLog << std::endl;
424  }
425 
426  trainingLog << "Pi: ";
427  for(size_t i=0; i<pi.size(); i++){
428  trainingLog << pi[i] << "\t";
429  }
430  trainingLog << std::endl;
431 
432  trainingLog << "SigmaStates: ";
433  for(UINT i=0; i<sigmaStates.getNumRows(); i++){
434  for(UINT j=0; j<sigmaStates.getNumCols(); j++){
435  trainingLog << sigmaStates[i][j] << "\t";
436  }
437  trainingLog << std::endl;
438  }
439  trainingLog << std::endl;
440 
441  //Check the weights all sum to 1
442  if( true ){
443  Float sum=0.0;
444  for(UINT i=0; i<a.getNumRows(); i++){
445  sum=0.0;
446  for(UINT j=0; j<a.getNumCols(); j++) sum += a[i][j];
447  if( sum <= 0.99 || sum >= 1.01 ) warningLog << "WARNING: A Row " << i <<" Sum: "<< sum << std::endl;
448  }
449  }
450  }
451 
452  return true;
453 
454 }
455 
456 bool ContinuousHiddenMarkovModel::setDownsampleFactor(const UINT downsampleFactor){
457  if( downsampleFactor > 0 ){
458  clear();
459  this->downsampleFactor = downsampleFactor;
460  return true;
461  }
462  warningLog << "setDownsampleFactor(const UINT downsampleFactor) - Failed to set downsample factor, it must be greater than zero!" << std::endl;
463  return false;
464 }
465 
466 bool ContinuousHiddenMarkovModel::setModelType(const UINT modelType){
467  if( modelType == HMM_ERGODIC || modelType == HMM_LEFTRIGHT ){
468  clear();
469  this->modelType = modelType;
470  return true;
471  }
472  warningLog << "setModelType(const UINT modelType) - Failed to set model type, unknown type!" << std::endl;
473  return false;
474 }
475 
477  if( delta > 0 ){
478  clear();
479  this->delta = delta;
480  return true;
481  }
482  warningLog << "setDelta(const UINT delta) - Failed to set delta, it must be greater than zero!" << std::endl;
483  return false;
484 }
485 
486 bool ContinuousHiddenMarkovModel::setSigma(const Float sigma){
487  if( sigma > 0 ){
488  this->sigma = sigma;
489 
490  if( !autoEstimateSigma && trained ){
491  sigmaStates.setAllValues(sigma);
492  }
493  return true;
494  }
495  warningLog << "setSigma(const Float sigma) - Failed to set sigma, it must be greater than zero!" << std::endl;
496  return false;
497 }
498 
499 bool ContinuousHiddenMarkovModel::setAutoEstimateSigma(const bool autoEstimateSigma){
500 
501  clear();
502 
503  this->autoEstimateSigma = autoEstimateSigma;
504 
505  return true;
506 }
507 
508 Float ContinuousHiddenMarkovModel::gauss( const MatrixFloat &x, const MatrixFloat &y, const MatrixFloat &sigma, const unsigned int i,const unsigned int j,const unsigned int N ){
509  Float z = 1;
510  for(unsigned int n=0; n<N; n++){
511  z *= (1.0/( sigma[i][n] * SQRT_TWO_PI )) * exp( - SQR(x[i][n]-y[j][n])/(2.0*SQR(sigma[i][n])) );
512  }
513  return z;
514 }
515 
516 bool ContinuousHiddenMarkovModel::save( std::fstream &file ) const{
517 
518  if(!file.is_open())
519  {
520  errorLog << "save( fstream &file ) - File is not open!" << std::endl;
521  return false;
522  }
523 
524  //Write the header info
525  file << "CONTINUOUS_HMM_MODEL_FILE_V1.0\n";
526 
527  //Write the base settings to the file
528  if( !MLBase::saveBaseSettingsToFile(file) ){
529  errorLog <<"save(fstream &file) - Failed to save classifier base settings to file!" << std::endl;
530  return false;
531  }
532 
533  file << "DownsampleFactor: " << downsampleFactor << std::endl;
534  file << "NumStates: " << numStates << std::endl;
535  file << "ClassLabel: " << classLabel << std::endl;
536  file << "TimeseriesLength: " << timeseriesLength << std::endl;
537  file << "Sigma: " << sigma << std::endl;
538  file << "AutoEstimateSigma: " << autoEstimateSigma << std::endl;
539  file << "ModelType: " << modelType << std::endl;
540  file << "Delta: " << delta << std::endl;
541  file << "Threshold: " << cThreshold << std::endl;
542 
543  if( trained ){
544  file << "A:\n";
545  for(UINT i=0; i<numStates; i++){
546  for(UINT j=0; j<numStates; j++){
547  file << a[i][j];
548  if( j+1 < numStates ) file << "\t";
549  }file << std::endl;
550  }
551 
552  file << "B:\n";
553  for(UINT i=0; i<numStates; i++){
554  for(UINT j=0; j<numInputDimensions; j++){
555  file << b[i][j];
556  if( j+1 < numInputDimensions ) file << "\t";
557  }file << std::endl;
558  }
559 
560  file<<"Pi: ";
561  for(UINT i=0; i<numStates; i++){
562  file << pi[i];
563  if( i+1 < numStates ) file << "\t";
564  }
565  file << std::endl;
566 
567  file << "SigmaStates: ";
568  for(UINT i=0; i<numStates; i++){
569  for(UINT j=0; j<numInputDimensions; j++){
570  file << sigmaStates[i][j];
571  if( j+1 < numInputDimensions ) file << "\t";
572  }file << std::endl;
573  }
574  file << std::endl;
575 
576  }
577 
578  return true;
579 }
580 
581 bool ContinuousHiddenMarkovModel::load( std::fstream &file ){
582 
583  clear();
584 
585  if(!file.is_open())
586  {
587  errorLog << "load( fstream &file ) - File is not open!" << std::endl;
588  return false;
589  }
590 
591  std::string word;
592 
593  file >> word;
594 
595  //Find the file type header
596  if(word != "CONTINUOUS_HMM_MODEL_FILE_V1.0"){
597  errorLog << "load( fstream &file ) - Could not find Model File Header!" << std::endl;
598  return false;
599  }
600 
601  //Load the base settings from the file
603  errorLog << "load(string filename) - Failed to load base settings from file!" << std::endl;
604  return false;
605  }
606 
607  file >> word;
608  if(word != "DownsampleFactor:"){
609  errorLog << "load( fstream &file ) - Could not find the DownsampleFactor header." << std::endl;
610  return false;
611  }
612  file >> downsampleFactor;
613 
614  file >> word;
615  if(word != "NumStates:"){
616  errorLog << "load( fstream &file ) - Could not find the NumStates header." << std::endl;
617  return false;
618  }
619  file >> numStates;
620 
621  file >> word;
622  if(word != "ClassLabel:"){
623  errorLog << "load( fstream &file ) - Could not find the ClassLabel header." << std::endl;
624  return false;
625  }
626  file >> classLabel;
627 
628  file >> word;
629  if(word != "TimeseriesLength:"){
630  errorLog << "load( fstream &file ) - Could not find the TimeseriesLength header." << std::endl;
631  return false;
632  }
633  file >> timeseriesLength;
634 
635  file >> word;
636  if(word != "Sigma:"){
637  errorLog << "load( fstream &file ) - Could not find the Sigma for the header." << std::endl;
638  return false;
639  }
640  file >> sigma;
641 
642  file >> word;
643  if(word != "AutoEstimateSigma:"){
644  errorLog << "load( fstream &file ) - Could not find the AutoEstimateSigma for the header." << std::endl;
645  return false;
646  }
647  file >> autoEstimateSigma;
648 
649  file >> word;
650  if(word != "ModelType:"){
651  errorLog << "load( fstream &file ) - Could not find the ModelType for the header." << std::endl;
652  return false;
653  }
654  file >> modelType;
655 
656  file >> word;
657  if(word != "Delta:"){
658  errorLog << "load( fstream &file ) - Could not find the Delta for the header." << std::endl;
659  return false;
660  }
661  file >> delta;
662 
663  file >> word;
664  if(word != "Threshold:"){
665  errorLog << "load( fstream &file ) - Could not find the Threshold for the header." << std::endl;
666  return false;
667  }
668  file >> cThreshold;
669 
670  if( trained ){
671  a.resize(numStates,numStates);
672  b.resize(numStates,numInputDimensions);
673  pi.resize(numStates);
674  sigmaStates.resize(numStates,numInputDimensions);
675 
676  //Load the A, B and Pi matrices
677  file >> word;
678  if(word != "A:"){
679  errorLog << "load( fstream &file ) - Could not find the A matrix header." << std::endl;
680  return false;
681  }
682 
683  //Load A
684  for(UINT i=0; i<numStates; i++){
685  for(UINT j=0; j<numStates; j++){
686  file >> a[i][j];
687  }
688  }
689 
690  file >> word;
691  if(word != "B:"){
692  errorLog << "load( fstream &file ) - Could not find the B matrix header." << std::endl;
693  return false;
694  }
695 
696  //Load B
697  for(UINT i=0; i<numStates; i++){
698  for(UINT j=0; j<numInputDimensions; j++){
699  file >> b[i][j];
700  }
701  }
702 
703  file >> word;
704  if(word != "Pi:"){
705  errorLog << "load( fstream &file ) - Could not find the Pi header." << std::endl;
706  return false;
707  }
708 
709  //Load Pi
710  for(UINT i=0; i<numStates; i++){
711  file >> pi[i];
712  }
713 
714  file >> word;
715  if(word != "SigmaStates:"){
716  errorLog << "load( fstream &file ) - Could not find the SigmaStates header." << std::endl;
717  return false;
718  }
719 
720  //Load sigmaStates
721  for(UINT i=0; i<numStates; i++){
722  for(UINT j=0; j<numInputDimensions; j++){
723  file >> sigmaStates[i][j];
724  }
725  }
726 
727  //Setup the observation buffer for prediction
728  observationSequence.resize( timeseriesLength, VectorFloat(numInputDimensions,0) );
729  obsSequence.resize(timeseriesLength,numInputDimensions);
730  estimatedStates.resize( numStates );
731  }
732 
733  return true;
734 }
735 
736 GRT_END_NAMESPACE
void clear()
Definition: Matrix.h:522
bool saveBaseSettingsToFile(std::fstream &file) const
Definition: MLBase.cpp:375
bool push_back(const T &value)
Vector< UINT > estimatedStates
The estimated states for prediction.
virtual bool reset()
Definition: MLBase.cpp:125
Float cThreshold
The classification threshold for this model.
This class acts as the main interface for using a Hidden Markov Model.
MatrixFloat a
The transitions probability matrix.
virtual bool resize(const unsigned int size)
Definition: Vector.h:133
Float loglikelihood
The log likelihood of an observation sequence given the modal, calculated by the forward method...
UINT getSize() const
Definition: Vector.h:191
VectorFloat pi
The state start probability vector.
UINT modelType
The model type (LEFTRIGHT, or ERGODIC)
bool setAllValues(const T &value)
Definition: Matrix.h:336
UINT delta
The number of states a model can move to in a LEFTRIGHT model.
CircularBuffer< VectorFloat > observationSequence
A buffer to store data for realtime prediction.
bool copyMLBaseVariables(const MLBase *mlBase)
Definition: MLBase.cpp:50
This class implements a continuous Hidden Markov Model.
UINT classLabel
The class label associated with this model.
unsigned int getNumRows() const
Definition: Matrix.h:542
unsigned int getNumCols() const
Definition: Matrix.h:549
MatrixFloat sigmaStates
The sigma value for each state.
bool loadBaseSettingsFromFile(std::fstream &file)
Definition: MLBase.cpp:398
virtual bool clear()
Definition: MLBase.cpp:127
virtual bool load(std::fstream &file)
virtual bool predict_(VectorFloat &x)
virtual bool resize(const unsigned int r, const unsigned int c)
Definition: Matrix.h:232
virtual bool save(std::fstream &file) const
MatrixFloat b
The emissions probability matrix.
UINT timeseriesLength
The length of the training timeseries.
Definition: MLBase.h:70
unsigned int getSize() const
bool setModelType(const UINT modelType)
bool resize(const unsigned int newBufferSize)
UINT numStates
The number of states for this model.