GestureRecognitionToolkit  Version: 0.2.5
The Gesture Recognition Toolkit (GRT) is a cross-platform, open-source, c++ machine learning library for real-time gesture recognition.
AdaBoost.cpp
1 /*
2 GRT MIT License
3 Copyright (c) <2012> <Nicholas Gillian, Media Lab, MIT>
4 
5 Permission is hereby granted, free of charge, to any person obtaining a copy of this software
6 and associated documentation files (the "Software"), to deal in the Software without restriction,
7 including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
9 subject to the following conditions:
10 
11 The above copyright notice and this permission notice shall be included in all copies or substantial
12 portions of the Software.
13 
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
15 LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
16 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
17 WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
18 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19 */
20 
21 #define GRT_DLL_EXPORTS
22 #include "AdaBoost.h"
23 
24 GRT_BEGIN_NAMESPACE
25 
26 //Define the string that will be used to identify the object
27 const std::string AdaBoost::id = "AdaBoost";
28 std::string AdaBoost::getId() { return AdaBoost::id; }
29 
30 //Register the AdaBoost module with the Classifier base class
31 RegisterClassifierModule< AdaBoost > AdaBoost::registerModule( getId() );
32 
33 AdaBoost::AdaBoost(const WeakClassifier &weakClassifier,bool useScaling,bool useNullRejection,Float nullRejectionCoeff,UINT numBoostingIterations,UINT predictionMethod) : Classifier( AdaBoost::getId() )
34 {
35  setWeakClassifier( weakClassifier );
36  this->useScaling = useScaling;
37  this->useNullRejection = useNullRejection;
38  this->nullRejectionCoeff = nullRejectionCoeff;
39  this->numBoostingIterations = numBoostingIterations;
40  this->predictionMethod = predictionMethod;
41  classifierMode = STANDARD_CLASSIFIER_MODE;
42 }
43 
45 {
46  classifierMode = STANDARD_CLASSIFIER_MODE;
47  *this = rhs;
48 }
49 
51 {
52  //Clean up any weak classifiers
54 }
55 
57  if( this != &rhs ){
58  //Clear the current weak classifiers
60 
61  this->numBoostingIterations = rhs.numBoostingIterations;
62  this->predictionMethod = rhs.predictionMethod;
63  this->models = rhs.models;
64 
65  if( rhs.weakClassifiers.getSize() > 0 ){
66  this->weakClassifiers.reserve( rhs.weakClassifiers.getSize() );
67  for(UINT i=0; i<rhs.weakClassifiers.getSize(); i++){
68  WeakClassifier *weakClassiferPtr = rhs.weakClassifiers[i]->createNewInstance();
69  weakClassifiers.push_back( weakClassiferPtr );
70  }
71  }
72 
73  //Clone the classifier variables
74  copyBaseVariables( dynamic_cast<const Classifier*>( &rhs ) );
75  }
76  return *this;
77 }
78 
79 bool AdaBoost::deepCopyFrom(const Classifier *classifier){
80 
81  if( classifier == NULL ){
82  errorLog << "deepCopyFrom(const Classifier *classifier) - The classifier pointer is NULL!" << std::endl;
83  return false;
84  }
85 
86  if( this->getId() == classifier->getId() ){
87  //Clone the AdaBoost values
88  const AdaBoost *ptr = dynamic_cast<const AdaBoost*>( classifier );
89 
90  //Clear the current weak classifiers
92 
93  this->numBoostingIterations = ptr->numBoostingIterations;
94  this->predictionMethod = ptr->predictionMethod;
95  this->models = ptr->models;
96 
97  if( ptr->weakClassifiers.size() > 0 ){
98  this->weakClassifiers.resize( ptr->weakClassifiers.getSize() );
99  for(UINT i=0; i<ptr->weakClassifiers.getSize(); i++){
100  weakClassifiers[i] = ptr->weakClassifiers[i]->createNewInstance();
101  }
102  }
103 
104  //Clone the classifier variables
105  return copyBaseVariables( classifier );
106  }
107  return false;
108 }
109 
111 
112  //Clear any previous model
113  clear();
114 
115  if( trainingData.getNumSamples() <= 1 ){
116  errorLog << "train_(ClassificationData &trainingData) - There are not enough training samples to train a model! Number of samples: " << trainingData.getNumSamples() << std::endl;
117  return false;
118  }
119 
120  numInputDimensions = trainingData.getNumDimensions();
121  numOutputDimensions = trainingData.getNumClasses();
122  numClasses = trainingData.getNumClasses();
123  const UINT POSITIVE_LABEL = WEAK_CLASSIFIER_POSITIVE_CLASS_LABEL;
124  const UINT NEGATIVE_LABEL = WEAK_CLASSIFIER_NEGATIVE_CLASS_LABEL;
125  Float alpha = 0;
126  const Float beta = 0.001;
127  Float epsilon = 0;
128  TrainingResult trainingResult;
129  ClassificationData validationData;
130 
131  const UINT K = weakClassifiers.getSize();
132  if( K == 0 ){
133  errorLog << "train_(ClassificationData &trainingData) - No weakClassifiers have been set. You need to set at least one weak classifier first." << std::endl;
134  return false;
135  }
136 
137  //Pass the logging state onto the weak classifiers
138  for(UINT k=0; k<K; k++){
139  weakClassifiers[k]->setTrainingLoggingEnabled( this->getTrainingLoggingEnabled() );
140  }
141 
142  classLabels.resize(numClasses);
143  models.resize(numClasses);
144  ranges = trainingData.getRanges();
145 
146  //Scale the training data if needed
147  if( useScaling ){
148  trainingData.scale(ranges,0,1);
149  }
150 
151  if( useValidationSet ){
152  validationData = trainingData.split( 100-validationSetSize );
153  }
154 
155  const UINT M = trainingData.getNumSamples();
156  trainingLog << "Training AdaBoost model, num training examples: " << M << ", num validation examples: " << validationData.getNumSamples() << ", num classes: " << numClasses << ", num weak learners: " << K << std::endl;
157 
158  //Create the weights vector
159  VectorFloat weights(M);
160 
161  //Create the error matrix
162  MatrixFloat errorMatrix(K,M);
163 
164  for(UINT classIter=0; classIter<numClasses; classIter++){
165 
166  //Get the class label for the current class
167  classLabels[classIter] = trainingData.getClassLabels()[classIter];
168 
169  //Set the class label of the current model
170  models[ classIter ].setClassLabel( classLabels[classIter] );
171 
172  //Setup the labels for this class, POSITIVE_LABEL == 1, NEGATIVE_LABEL == 2
173  ClassificationData classData;
174  classData.setNumDimensions(trainingData.getNumDimensions());
175  for(UINT i=0; i<M; i++){
176  UINT label = trainingData[i].getClassLabel()==classLabels[classIter] ? POSITIVE_LABEL : NEGATIVE_LABEL;
177  VectorFloat trainingSample = trainingData[i].getSample();
178  classData.addSample(label,trainingSample);
179  }
180 
181  //Setup the initial training sample weights
182  std::fill(weights.begin(),weights.end(),1.0/M);
183 
184  //Run the boosting loop
185  bool keepBoosting = true;
186  UINT t = 0;
187 
188  while( keepBoosting ){
189 
190  //Pick the classifier from the family of classifiers that minimizes the total error
191  UINT bestClassifierIndex = 0;
192  Float minError = grt_numeric_limits< Float >::max();
193  for(UINT k=0; k<K; k++){
194  //Get the k'th possible classifier
195  WeakClassifier *weakLearner = weakClassifiers[k];
196 
197  //Train the current classifier
198  if( !weakLearner->train(classData,weights) ){
199  errorLog << __GRT_LOG__ << " Failed to train weakLearner!" << std::endl;
200  return false;
201  }
202 
203  //Compute the weighted error for this clasifier
204  Float e = 0;
205  Float positiveLabel = weakLearner->getPositiveClassLabel();
206  Float numCorrect = 0;
207  Float numIncorrect = 0;
208  for(UINT i=0; i<M; i++){
209  //Only penalize errors
210  Float prediction = weakLearner->predict( classData[i].getSample() );
211 
212  if( (prediction == positiveLabel && classData[i].getClassLabel() != POSITIVE_LABEL) || //False positive
213  (prediction != positiveLabel && classData[i].getClassLabel() == POSITIVE_LABEL) ){ //False negative
214  e += weights[i]; //Increase the error proportional to the weight of the example
215  errorMatrix[k][i] = 1; //Flag that there was an error
216  numIncorrect++;
217  }else{
218  errorMatrix[k][i] = 0; //Flag that there was no error
219  numCorrect++;
220  }
221  }
222 
223  trainingLog << "PositiveClass: " << classLabels[classIter] << " Boosting Iter: " << t << " Classifier: " << k << " WeightedError: " << e << " NumCorrect: " << numCorrect/M << " NumIncorrect: " <<numIncorrect/M << std::endl;
224 
225  if( e < minError ){
226  minError = e;
227  bestClassifierIndex = k;
228  }
229 
230  }
231 
232  epsilon = minError;
233 
234  //Set alpha, using the M1 weight value, small weights (close to 0) will receive a strong weight in the final classifier
235  alpha = 0.5 * log( (1.0-epsilon)/epsilon );
236 
237  trainingLog << "PositiveClass: " << classLabels[classIter] << " Boosting Iter: " << t << " Best Classifier Index: " << bestClassifierIndex << " MinError: " << minError << " Alpha: " << alpha << std::endl;
238 
239  if( grt_isinf(alpha) ){ keepBoosting = false; trainingLog << "Alpha is INF. Stopping boosting for current class" << std::endl; }
240  if( 0.5 - epsilon <= beta ){ keepBoosting = false; trainingLog << "Epsilon <= Beta. Stopping boosting for current class" << std::endl; }
241  if( ++t >= numBoostingIterations ) keepBoosting = false;
242 
243  trainingResult.setClassificationResult(t, minError, this);
244  trainingResults.push_back(trainingResult);
245  trainingResultsObserverManager.notifyObservers( trainingResult );
246 
247  if( keepBoosting ){
248 
249  //Add the best weak classifier to the committee
250  models[ classIter ].addClassifierToCommitee( weakClassifiers[bestClassifierIndex], alpha );
251 
252  //Update the weights for the next boosting iteration
253  Float reWeight = (1.0 - epsilon) / epsilon;
254  Float oldSum = 0;
255  Float newSum = 0;
256  for(UINT i=0; i<M; i++){
257  oldSum += weights[i];
258  //Only update the weights that resulted in an incorrect prediction
259  if( errorMatrix[bestClassifierIndex][i] == 1 ) weights[i] *= reWeight;
260  newSum += weights[i];
261  }
262 
263  //Normalize all the weights
264  //This results to increasing the weights of the samples that were incorrectly labelled
265  //While decreasing the weights of the samples that were correctly classified
266  reWeight = oldSum/newSum;
267  for(UINT i=0; i<M; i++){
268  weights[i] *= reWeight;
269  }
270 
271  }else{
272  trainingLog << "Stopping boosting training at iteration : " << t-1 << " with an error of " << epsilon << std::endl;
273  if( t-1 == 0 ){
274  //Add the best weak classifier to the committee (we have to add it as this is the first iteration)
275  if( grt_isinf(alpha) ){ alpha = 1; } //If alpha is infinite then the first classifier got everything correct
276  models[ classIter ].addClassifierToCommitee( weakClassifiers[bestClassifierIndex], alpha );
277  }
278  }
279 
280  }
281  }
282 
283  //Normalize the weights
284  for(UINT k=0; k<numClasses; k++){
285  models[k].normalizeWeights();
286  }
287 
288  //Flag that the model has been fully trained
289  trained = true;
290  converged = true;
291 
292  //Setup the data for prediction
293  predictedClassLabel = 0;
294  maxLikelihood = 0;
295  classLikelihoods.resize(numClasses);
296  classDistances.resize(numClasses);
297 
298  //Compute the final training stats
299  trainingSetAccuracy = 0;
300  validationSetAccuracy = 0;
301 
302  //If scaling was on, then the data will already be scaled, so turn it off temporially
303  bool scalingState = useScaling;
304  useScaling = false;
305  for(UINT i=0; i<M; i++){
306  if( !predict_( trainingData[i].getSample() ) ){
307  trained = false;
308  errorLog << __GRT_LOG__ << " Failed to run prediction for training sample: " << i << "! Failed to fully train model!" << std::endl;
309  return false;
310  }
311 
312  if( predictedClassLabel == trainingData[i].getClassLabel() ){
313  trainingSetAccuracy++;
314  }
315  }
316 
317  if( useValidationSet ){
318  for(UINT i=0; i<validationData.getNumSamples(); i++){
319  if( !predict_( validationData[i].getSample() ) ){
320  trained = false;
321  errorLog << __GRT_LOG__ << " Failed to run prediction for validation sample: " << i << "! Failed to fully train model!" << std::endl;
322  return false;
323  }
324 
325  if( predictedClassLabel == validationData[i].getClassLabel() ){
326  validationSetAccuracy++;
327  }
328  }
329  }
330 
331  trainingSetAccuracy = trainingSetAccuracy / M * 100.0;
332 
333  trainingLog << "Training set accuracy: " << trainingSetAccuracy << std::endl;
334 
335  if( useValidationSet ){
336  validationSetAccuracy = validationSetAccuracy / validationData.getNumSamples() * 100.0;
337  trainingLog << "Validation set accuracy: " << validationSetAccuracy << std::endl;
338  }
339 
340  //Reset the scaling state for future prediction
341  useScaling = scalingState;
342 
343  return true;
344 }
345 
346 bool AdaBoost::predict_(VectorFloat &inputVector){
347 
348  predictedClassLabel = 0;
349  maxLikelihood = -10000;
350 
351  if( !trained ){
352  errorLog << __GRT_LOG__ << " AdaBoost Model Not Trained!" << std::endl;
353  return false;
354  }
355 
356  if( inputVector.getSize() != numInputDimensions ){
357  errorLog << __GRT_LOG__ << " The size of the input vector (" << inputVector.size() << ") does not match the num features in the model (" << numInputDimensions << std::endl;
358  return false;
359  }
360 
361  if( useScaling ){
362  for(UINT n=0; n<numInputDimensions; n++){
363  inputVector[n] = scale(inputVector[n], ranges[n].minValue, ranges[n].maxValue, 0, 1);
364  }
365  }
366 
367  if( classLikelihoods.getSize() != numClasses ) classLikelihoods.resize(numClasses,0);
368  if( classDistances.getSize() != numClasses ) classDistances.resize(numClasses,0);
369 
370  UINT bestClassIndex = 0;
371  UINT numPositivePredictions = 0;
372  bestDistance = -grt_numeric_limits< Float >::max();
373  Float worstDistance = grt_numeric_limits< Float >::max();
374  Float sum = 0;
375  for(UINT k=0; k<numClasses; k++){
376  Float result = models[k].predict( inputVector );
377 
378  switch ( predictionMethod ) {
379  case MAX_POSITIVE_VALUE:
380  if( result > 0 ){
381  if( result > bestDistance ){
382  bestDistance = result;
383  bestClassIndex = k;
384  }
385  numPositivePredictions++;
386  classLikelihoods[k] = result;
387  }else classLikelihoods[k] = 0;
388 
389  classDistances[k] = result;
390  sum += classLikelihoods[k];
391 
392  break;
393  case MAX_VALUE:
394  if( result > bestDistance ){
395  bestDistance = result;
396  bestClassIndex = k;
397  }
398  if( result < worstDistance ){
399  worstDistance = result;
400  }
401  numPositivePredictions++; //In the MAX_VALUE mode we assume all samples are valid
402  classLikelihoods[k] = result;
403  classDistances[k] = result;
404 
405  break;
406  default:
407  errorLog << __GRT_LOG__ << " Unknown prediction method!" << std::endl;
408  break;
409  }
410  }
411 
412  if( predictionMethod == MAX_VALUE ){
413  //Some of the class likelihoods might be negative, so we add the most negative value to each to offset this
414  worstDistance = fabs( worstDistance );
415  for(UINT k=0; k<numClasses; k++){
416  classLikelihoods[k] += worstDistance;
417  sum += classLikelihoods[k];
418  }
419  }
420 
421  //Normalize the class likelihoods
422  if( sum > 0 ){
423  for(UINT k=0; k<numClasses; k++)
424  classLikelihoods[k] /= sum;
425  }
426  maxLikelihood = classLikelihoods[ bestClassIndex ];
427 
428  if( numPositivePredictions == 0 ){
429  predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
430  }else predictedClassLabel = classLabels[ bestClassIndex ];
431 
432  return true;
433 }
434 
436 
437  if( trained ){
438  //Todo - need to add null rejection for AdaBoost
439  return false;
440  }
441  return false;
442 }
443 
444 bool AdaBoost::setNullRejectionCoeff(Float nullRejectionCoeff){
445 
446  if( nullRejectionCoeff > 0 ){
447  this->nullRejectionCoeff = nullRejectionCoeff;
449  return true;
450  }
451  return false;
452 }
453 
454 bool AdaBoost::save( std::fstream &file ) const{
455 
456  if(!file.is_open())
457  {
458  errorLog <<"save(fstream &file) - The file is not open!" << std::endl;
459  return false;
460  }
461 
462  //Write the header info
463  file<<"GRT_ADABOOST_MODEL_FILE_V2.0\n";
464 
465  //Write the classifier settings to the file
467  errorLog << __GRT_LOG__ << " Failed to save classifier base settings to file!" << std::endl;
468  return false;
469  }
470 
471  //Write the AdaBoost settings to the file
472  file << "PredictionMethod: " << predictionMethod << std::endl;
473 
474  //If the model has been trained then write the model
475  if( trained ){
476  file << "Models: " << std::endl;
477  for(UINT i=0; i<models.size(); i++){
478  if( !models[i].save( file ) ){
479  errorLog << __GRT_LOG__ << " Failed to write model " << i << " to file!" << std::endl;
480  file.close();
481  return false;
482  }
483  }
484  }
485 
486  return true;
487 }
488 
489 bool AdaBoost::load( std::fstream &file ){
490 
491  clear();
492 
493  if(!file.is_open())
494  {
495  errorLog << __GRT_LOG__ << " Could not open file to load model!" << std::endl;
496  return false;
497  }
498 
499  std::string word;
500  file >> word;
501 
502  //Check to see if we should load a legacy file
503  if( word == "GRT_ADABOOST_MODEL_FILE_V1.0" ){
504  return loadLegacyModelFromFile( file );
505  }
506 
507  if( word != "GRT_ADABOOST_MODEL_FILE_V2.0" ){
508  errorLog << __GRT_LOG__ << " Failed to read file header!" << std::endl;
509  errorLog << word << std::endl;
510  return false;
511  }
512 
513  //Load the base settings from the file
515  errorLog << __GRT_LOG__ << " Failed to load base settings from file!" << std::endl;
516  return false;
517  }
518 
519  file >> word;
520  if( word != "PredictionMethod:" ){
521  errorLog << __GRT_LOG__ << " Failed to read PredictionMethod header!" << std::endl;
522  return false;
523  }
524  file >> predictionMethod;
525 
526  if( trained ){
527  file >> word;
528  if( word != "Models:" ){
529  errorLog << __GRT_LOG__ << " Failed to read Models header!" << std::endl;
530  return false;
531  }
532 
533  //Load the models
534  models.resize( numClasses );
535  for(UINT i=0; i<models.getSize(); i++){
536  if( !models[i].load( file ) ){
537  errorLog << __GRT_LOG__ << " Failed to load model " << i << " from file!" << std::endl;
538  file.close();
539  return false;
540  }
541  }
542 
543  //Recompute the null rejection thresholds
545 
546  //Resize the prediction results to make sure it is setup for realtime prediction
547  maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
548  bestDistance = DEFAULT_NULL_DISTANCE_VALUE;
549  classLikelihoods.resize(numClasses,DEFAULT_NULL_LIKELIHOOD_VALUE);
550  classDistances.resize(numClasses,DEFAULT_NULL_DISTANCE_VALUE);
551  }
552 
553  return true;
554 }
555 
557 
558  //Clear the Classifier variables
560 
561  //Clear the AdaBoost model
562  models.clear();
563 
564  return true;
565 }
566 
567 bool AdaBoost::setWeakClassifier(const WeakClassifier &weakClassifer){
568 
569  //Clear any previous weak classifers
571 
572  WeakClassifier *weakClassiferPtr = weakClassifer.createNewInstance();
573 
574  weakClassifiers.push_back( weakClassiferPtr );
575 
576  return true;
577 }
578 
579 bool AdaBoost::addWeakClassifier(const WeakClassifier &weakClassifer){
580 
581  WeakClassifier *weakClassiferPtr = weakClassifer.createNewInstance();
582  weakClassifiers.push_back( weakClassiferPtr );
583 
584  return true;
585 }
586 
588 
589  for(UINT i=0; i<weakClassifiers.size(); i++){
590  if( weakClassifiers[i] != NULL ){
591  delete weakClassifiers[i];
592  weakClassifiers[i] = NULL;
593  }
594  }
595  weakClassifiers.clear();
596  return true;
597 }
598 
599 bool AdaBoost::setNumBoostingIterations(UINT numBoostingIterations){
600  if( numBoostingIterations > 0 ){
601  this->numBoostingIterations = numBoostingIterations;
602  return true;
603  }
604  return false;
605 }
606 
607 bool AdaBoost::setPredictionMethod(UINT predictionMethod){
608  if( predictionMethod != MAX_POSITIVE_VALUE && predictionMethod != MAX_VALUE ){
609  return false;
610  }
611  this->predictionMethod = predictionMethod;
612  return true;
613 }
614 
616 
617  std::cout <<"AdaBoostModel: \n";
618  std::cout<<"NumFeatures: " << numInputDimensions << std::endl;
619  std::cout<<"NumClasses: " << numClasses << std::endl;
620  std::cout <<"UseScaling: " << useScaling << std::endl;
621  std::cout<<"UseNullRejection: " << useNullRejection << std::endl;
622 
623  for(UINT k=0; k<numClasses; k++){
624  std::cout << "Class: " << k+1 << " ClassLabel: " << classLabels[k] << std::endl;
625  models[k].print();
626  }
627 
628 }
629 
630 bool AdaBoost::loadLegacyModelFromFile( std::fstream &file ){
631 
632  std::string word;
633 
634  file >> word;
635  if( word != "NumFeatures:" ){
636  errorLog << __GRT_LOG__ << " Failed to read NumFeatures header!" << std::endl;
637  return false;
638  }
639  file >> numInputDimensions;
640 
641  file >> word;
642  if( word != "NumClasses:" ){
643  errorLog << __GRT_LOG__ << " Failed to read NumClasses header!" << std::endl;
644  return false;
645  }
646  file >> numClasses;
647 
648  file >> word;
649  if( word != "UseScaling:" ){
650  errorLog << __GRT_LOG__ << " Failed to read UseScaling header!" << std::endl;
651  return false;
652  }
653  file >> useScaling;
654 
655  file >> word;
656  if( word != "UseNullRejection:" ){
657  errorLog << __GRT_LOG__ << " Failed to read UseNullRejection header!" << std::endl;
658  return false;
659  }
660  file >> useNullRejection;
661 
662  if( useScaling ){
663  file >> word;
664  if( word != "Ranges:" ){
665  errorLog << __GRT_LOG__ << " Failed to read Ranges header!" << std::endl;
666  return false;
667  }
668  ranges.resize( numInputDimensions );
669 
670  for(UINT n=0; n<ranges.size(); n++){
671  file >> ranges[n].minValue;
672  file >> ranges[n].maxValue;
673  }
674  }
675 
676  file >> word;
677  if( word != "Trained:" ){
678  errorLog << __GRT_LOG__ << " Failed to read Trained header!" << std::endl;
679  return false;
680  }
681  file >> trained;
682 
683  file >> word;
684  if( word != "PredictionMethod:" ){
685  errorLog << __GRT_LOG__ << " Failed to read PredictionMethod header!" << std::endl;
686  return false;
687  }
688  file >> predictionMethod;
689 
690  if( trained ){
691  file >> word;
692  if( word != "Models:" ){
693  errorLog << __GRT_LOG__ << " Failed to read Models header!" << std::endl;
694  return false;
695  }
696 
697  //Load the models
698  models.resize( numClasses );
699  classLabels.resize( numClasses );
700  for(UINT i=0; i<models.getSize(); i++){
701  if( !models[i].load( file ) ){
702  errorLog << __GRT_LOG__ << " Failed to load model " << i << " from file!" << std::endl;
703  file.close();
704  return false;
705  }
706 
707  //Set the class label
708  classLabels[i] = models[i].getClassLabel();
709  }
710  }
711 
712  //Recompute the null rejection thresholds
714 
715  //Resize the prediction results to make sure it is setup for realtime prediction
716  maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
717  bestDistance = DEFAULT_NULL_DISTANCE_VALUE;
718  classLikelihoods.resize(numClasses,DEFAULT_NULL_LIKELIHOOD_VALUE);
719  classDistances.resize(numClasses,DEFAULT_NULL_DISTANCE_VALUE);
720 
721  return true;
722 }
723 
724 GRT_END_NAMESPACE
bool setClassificationResult(unsigned int trainingIteration, Float accuracy, MLBase *trainer)
bool saveBaseSettingsToFile(std::fstream &file) const
Definition: Classifier.cpp:274
bool setPredictionMethod(UINT predictionMethod)
Definition: AdaBoost.cpp:607
std::string getId() const
Definition: GRTBase.cpp:85
#define DEFAULT_NULL_LIKELIHOOD_VALUE
Definition: Classifier.h:33
virtual Float predict(const VectorFloat &x)
virtual bool save(std::fstream &file) const
Definition: AdaBoost.cpp:454
AdaBoost(const WeakClassifier &weakClassifier=DecisionStump(), bool useScaling=false, bool useNullRejection=false, Float nullRejectionCoeff=10.0, UINT numBoostingIterations=20, UINT predictionMethod=MAX_VALUE)
Definition: AdaBoost.cpp:33
bool addSample(const UINT classLabel, const VectorFloat &sample)
bool getTrainingLoggingEnabled() const
Definition: MLBase.cpp:312
void printModel()
Definition: AdaBoost.cpp:615
virtual bool resize(const unsigned int size)
Definition: Vector.h:133
bool setNumDimensions(UINT numDimensions)
UINT getSize() const
Definition: Vector.h:201
Vector< UINT > getClassLabels() const
virtual bool train_(ClassificationData &trainingData)
Definition: AdaBoost.cpp:110
virtual bool train(ClassificationData &trainingData, VectorFloat &weights)
bool setNumBoostingIterations(UINT numBoostingIterations)
Definition: AdaBoost.cpp:599
virtual Float getPositiveClassLabel() const
WeakClassifier * createNewInstance() const
UINT getNumSamples() const
#define WEAK_CLASSIFIER_POSITIVE_CLASS_LABEL
virtual bool load(std::fstream &file)
Definition: AdaBoost.cpp:489
bool addWeakClassifier(const WeakClassifier &weakClassifer)
Definition: AdaBoost.cpp:579
virtual bool recomputeNullRejectionThresholds()
Definition: AdaBoost.cpp:435
virtual bool deepCopyFrom(const Classifier *classifier)
Definition: AdaBoost.cpp:79
virtual ~AdaBoost()
Definition: AdaBoost.cpp:50
virtual bool predict_(VectorFloat &inputVector)
Definition: AdaBoost.cpp:346
bool clearWeakClassifiers()
Definition: AdaBoost.cpp:587
bool copyBaseVariables(const Classifier *classifier)
Definition: Classifier.cpp:101
bool loadBaseSettingsFromFile(std::fstream &file)
Definition: Classifier.cpp:321
UINT getNumDimensions() const
UINT getNumClasses() const
virtual bool clear()
Definition: AdaBoost.cpp:556
bool setWeakClassifier(const WeakClassifier &weakClassifer)
Definition: AdaBoost.cpp:567
Vector< MinMax > getRanges() const
static std::string getId()
Definition: AdaBoost.cpp:28
ClassificationData split(const UINT splitPercentage, const bool useStratifiedSampling=false)
AdaBoost & operator=(const AdaBoost &rhs)
Definition: AdaBoost.cpp:56
bool setNullRejectionCoeff(Float nullRejectionCoeff)
Definition: AdaBoost.cpp:444
bool scale(const Float minTarget, const Float maxTarget)
virtual bool clear()
Definition: Classifier.cpp:151
This is the main base class that all GRT Classification algorithms should inherit from...
Definition: Classifier.h:41
Float scale(const Float &x, const Float &minSource, const Float &maxSource, const Float &minTarget, const Float &maxTarget, const bool constrain=false)
Definition: GRTBase.h:184