GestureRecognitionToolkit  Version: 0.2.0
The Gesture Recognition Toolkit (GRT) is a cross-platform, open-source, c++ machine learning library for real-time gesture recognition.
AdaBoost.cpp
1 /*
2 GRT MIT License
3 Copyright (c) <2012> <Nicholas Gillian, Media Lab, MIT>
4 
5 Permission is hereby granted, free of charge, to any person obtaining a copy of this software
6 and associated documentation files (the "Software"), to deal in the Software without restriction,
7 including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
9 subject to the following conditions:
10 
11 The above copyright notice and this permission notice shall be included in all copies or substantial
12 portions of the Software.
13 
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
15 LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
16 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
17 WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
18 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19 */
20 
21 #define GRT_DLL_EXPORTS
22 #include "AdaBoost.h"
23 
24 GRT_BEGIN_NAMESPACE
25 
26 //Register the AdaBoost module with the Classifier base class
27 RegisterClassifierModule< AdaBoost > AdaBoost::registerModule("AdaBoost");
28 
29 AdaBoost::AdaBoost(const WeakClassifier &weakClassifier,bool useScaling,bool useNullRejection,Float nullRejectionCoeff,UINT numBoostingIterations,UINT predictionMethod)
30 {
31  setWeakClassifier( weakClassifier );
32  this->useScaling = useScaling;
33  this->useNullRejection = useNullRejection;
34  this->nullRejectionCoeff = nullRejectionCoeff;
35  this->numBoostingIterations = numBoostingIterations;
36  this->predictionMethod = predictionMethod;
37  classType = "AdaBoost";
38  classifierType = classType;
39  classifierMode = STANDARD_CLASSIFIER_MODE;
40  debugLog.setProceedingText("[DEBUG AdaBoost]");
41  errorLog.setProceedingText("[ERROR AdaBoost]");
42  trainingLog.setProceedingText("[TRAINING AdaBoost]");
43  warningLog.setProceedingText("[WARNING AdaBoost]");
44 }
45 
47  classifierType = "AdaBoost";
48  classifierMode = STANDARD_CLASSIFIER_MODE;
49  debugLog.setProceedingText("[DEBUG AdaBoost]");
50  errorLog.setProceedingText("[ERROR AdaBoost]");
51  trainingLog.setProceedingText("[TRAINING AdaBoost]");
52  warningLog.setProceedingText("[WARNING AdaBoost]");
53  *this = rhs;
54 }
55 
57 {
58  //Clean up any weak classifiers
60 }
61 
63  if( this != &rhs ){
64  //Clear the current weak classifiers
66 
67  this->numBoostingIterations = rhs.numBoostingIterations;
68  this->predictionMethod = rhs.predictionMethod;
69  this->models = rhs.models;
70 
71  if( rhs.weakClassifiers.size() > 0 ){
72  for(UINT i=0; i<rhs.weakClassifiers.size(); i++){
73  WeakClassifier *weakClassiferPtr = rhs.weakClassifiers[i]->createNewInstance();
74  weakClassifiers.push_back( weakClassiferPtr );
75  }
76  }
77 
78  //Clone the classifier variables
79  copyBaseVariables( (Classifier*)&rhs );
80  }
81  return *this;
82 }
83 
84 bool AdaBoost::deepCopyFrom(const Classifier *classifier){
85 
86  if( classifier == NULL ){
87  errorLog << "deepCopyFrom(const Classifier *classifier) - The classifier pointer is NULL!" << std::endl;
88  return false;
89  }
90 
91  if( this->getClassifierType() == classifier->getClassifierType() ){
92  //Clone the AdaBoost values
93  AdaBoost *ptr = (AdaBoost*)classifier;
94 
95  //Clear the current weak classifiers
97 
98  this->numBoostingIterations = ptr->numBoostingIterations;
99  this->predictionMethod = ptr->predictionMethod;
100  this->models = ptr->models;
101 
102  if( ptr->weakClassifiers.size() > 0 ){
103  for(UINT i=0; i<ptr->weakClassifiers.size(); i++){
104  WeakClassifier *weakClassiferPtr = ptr->weakClassifiers[i]->createNewInstance();
105  weakClassifiers.push_back( weakClassiferPtr );
106  }
107  }
108 
109  //Clone the classifier variables
110  return copyBaseVariables( classifier );
111  }
112  return false;
113 }
114 
116 
117  //Clear any previous model
118  clear();
119 
120  if( trainingData.getNumSamples() <= 1 ){
121  errorLog << "train_(ClassificationData &trainingData) - There are not enough training samples to train a model! Number of samples: " << trainingData.getNumSamples() << std::endl;
122  return false;
123  }
124 
125  numInputDimensions = trainingData.getNumDimensions();
126  numClasses = trainingData.getNumClasses();
127  const UINT M = trainingData.getNumSamples();
128  const UINT POSITIVE_LABEL = WEAK_CLASSIFIER_POSITIVE_CLASS_LABEL;
129  const UINT NEGATIVE_LABEL = WEAK_CLASSIFIER_NEGATIVE_CLASS_LABEL;
130  Float alpha = 0;
131  const Float beta = 0.001;
132  Float epsilon = 0;
133  TrainingResult trainingResult;
134 
135  const UINT K = (UINT)weakClassifiers.size();
136  if( K == 0 ){
137  errorLog << "train_(ClassificationData &trainingData) - No weakClassifiers have been set. You need to set at least one weak classifier first." << std::endl;
138  return false;
139  }
140 
141  classLabels.resize(numClasses);
142  models.resize(numClasses);
143  ranges = trainingData.getRanges();
144 
145  //Scale the training data if needed
146  if( useScaling ){
147  trainingData.scale(ranges,0,1);
148  }
149 
150  //Create the weights vector
151  VectorFloat weights(M);
152 
153  //Create the error matrix
154  MatrixFloat errorMatrix(K,M);
155 
156  for(UINT classIter=0; classIter<numClasses; classIter++){
157 
158  //Get the class label for the current class
159  classLabels[classIter] = trainingData.getClassLabels()[classIter];
160 
161  //Set the class label of the current model
162  models[ classIter ].setClassLabel( classLabels[classIter] );
163 
164  //Setup the labels for this class, POSITIVE_LABEL == 1, NEGATIVE_LABEL == 2
165  ClassificationData classData;
166  classData.setNumDimensions(trainingData.getNumDimensions());
167  for(UINT i=0; i<M; i++){
168  UINT label = trainingData[i].getClassLabel()==classLabels[classIter] ? POSITIVE_LABEL : NEGATIVE_LABEL;
169  VectorFloat trainingSample = trainingData[i].getSample();
170  classData.addSample(label,trainingSample);
171  }
172 
173  //Setup the initial training sample weights
174  std::fill(weights.begin(),weights.end(),1.0/M);
175 
176  //Run the boosting loop
177  bool keepBoosting = true;
178  UINT t = 0;
179 
180  while( keepBoosting ){
181 
182  //Pick the classifier from the family of classifiers that minimizes the total error
183  UINT bestClassifierIndex = 0;
184  Float minError = grt_numeric_limits< Float >::max();
185  for(UINT k=0; k<K; k++){
186  //Get the k'th possible classifier
187  WeakClassifier *weakLearner = weakClassifiers[k];
188 
189  //Train the current classifier
190  if( !weakLearner->train(classData,weights) ){
191  errorLog << "Failed to train weakLearner!" << std::endl;
192  return false;
193  }
194 
195  //Compute the weighted error for this clasifier
196  Float e = 0;
197  Float positiveLabel = weakLearner->getPositiveClassLabel();
198  Float numCorrect = 0;
199  Float numIncorrect = 0;
200  for(UINT i=0; i<M; i++){
201  //Only penalize errors
202  Float prediction = weakLearner->predict( classData[i].getSample() );
203 
204  if( (prediction == positiveLabel && classData[i].getClassLabel() != POSITIVE_LABEL) || //False positive
205  (prediction != positiveLabel && classData[i].getClassLabel() == POSITIVE_LABEL) ){ //False negative
206  e += weights[i]; //Increase the error proportional to the weight of the example
207  errorMatrix[k][i] = 1; //Flag that there was an error
208  numIncorrect++;
209  }else{
210  errorMatrix[k][i] = 0; //Flag that there was no error
211  numCorrect++;
212  }
213  }
214 
215  trainingLog << "PositiveClass: " << classLabels[classIter] << " Boosting Iter: " << t << " Classifier: " << k << " WeightedError: " << e << " NumCorrect: " << numCorrect/M << " NumIncorrect: " <<numIncorrect/M << std::endl;
216 
217  if( e < minError ){
218  minError = e;
219  bestClassifierIndex = k;
220  }
221 
222  }
223 
224  epsilon = minError;
225 
226  //Set alpha, using the M1 weight value, small weights (close to 0) will receive a strong weight in the final classifier
227  alpha = 0.5 * log( (1.0-epsilon)/epsilon );
228 
229  trainingLog << "PositiveClass: " << classLabels[classIter] << " Boosting Iter: " << t << " Best Classifier Index: " << bestClassifierIndex << " MinError: " << minError << " Alpha: " << alpha << std::endl;
230 
231  if( grt_isinf(alpha) ){ keepBoosting = false; trainingLog << "Alpha is INF. Stopping boosting for current class" << std::endl; }
232  if( 0.5 - epsilon <= beta ){ keepBoosting = false; trainingLog << "Epsilon <= Beta. Stopping boosting for current class" << std::endl; }
233  if( ++t >= numBoostingIterations ) keepBoosting = false;
234 
235  trainingResult.setClassificationResult(t, minError, this);
236  trainingResults.push_back(trainingResult);
237  trainingResultsObserverManager.notifyObservers( trainingResult );
238 
239  if( keepBoosting ){
240 
241  //Add the best weak classifier to the committee
242  models[ classIter ].addClassifierToCommitee( weakClassifiers[bestClassifierIndex], alpha );
243 
244  //Update the weights for the next boosting iteration
245  Float reWeight = (1.0 - epsilon) / epsilon;
246  Float oldSum = 0;
247  Float newSum = 0;
248  for(UINT i=0; i<M; i++){
249  oldSum += weights[i];
250  //Only update the weights that resulted in an incorrect prediction
251  if( errorMatrix[bestClassifierIndex][i] == 1 ) weights[i] *= reWeight;
252  newSum += weights[i];
253  }
254 
255  //Normalize all the weights
256  //This results to increasing the weights of the samples that were incorrectly labelled
257  //While decreasing the weights of the samples that were correctly classified
258  reWeight = oldSum/newSum;
259  for(UINT i=0; i<M; i++){
260  weights[i] *= reWeight;
261  }
262 
263  }else{
264  trainingLog << "Stopping boosting training at iteration : " << t-1 << " with an error of " << epsilon << std::endl;
265  if( t-1 == 0 ){
266  //Add the best weak classifier to the committee (we have to add it as this is the first iteration)
267  if( grt_isinf(alpha) ){ alpha = 1; } //If alpha is infinite then the first classifier got everything correct
268  models[ classIter ].addClassifierToCommitee( weakClassifiers[bestClassifierIndex], alpha );
269  }
270  }
271 
272  }
273  }
274 
275  //Normalize the weights
276  for(UINT k=0; k<numClasses; k++){
277  models[k].normalizeWeights();
278  }
279 
280  //Flag that the model has been trained
281  trained = true;
282 
283  //Setup the data for prediction
284  predictedClassLabel = 0;
285  maxLikelihood = 0;
286  classLikelihoods.resize(numClasses);
287  classDistances.resize(numClasses);
288 
289  return true;
290 }
291 
292 bool AdaBoost::predict_(VectorFloat &inputVector){
293 
294  predictedClassLabel = 0;
295  maxLikelihood = -10000;
296 
297  if( !trained ){
298  errorLog << "predict_(VectorFloat &inputVector) - AdaBoost Model Not Trained!" << std::endl;
299  return false;
300  }
301 
302  if( inputVector.size() != numInputDimensions ){
303  errorLog << "predict_(VectorFloat &inputVector) - The size of the input vector (" << inputVector.size() << ") does not match the num features in the model (" << numInputDimensions << std::endl;
304  return false;
305  }
306 
307  if( useScaling ){
308  for(UINT n=0; n<numInputDimensions; n++){
309  inputVector[n] = scale(inputVector[n], ranges[n].minValue, ranges[n].maxValue, 0, 1);
310  }
311  }
312 
313  if( classLikelihoods.size() != numClasses ) classLikelihoods.resize(numClasses,0);
314  if( classDistances.size() != numClasses ) classDistances.resize(numClasses,0);
315 
316  UINT bestClassIndex = 0;
317  UINT numPositivePredictions = 0;
318  bestDistance = -grt_numeric_limits< Float >::max();
319  Float worstDistance = grt_numeric_limits< Float >::max();
320  Float sum = 0;
321  for(UINT k=0; k<numClasses; k++){
322  Float result = models[k].predict( inputVector );
323 
324  switch ( predictionMethod ) {
325  case MAX_POSITIVE_VALUE:
326  if( result > 0 ){
327  if( result > bestDistance ){
328  bestDistance = result;
329  bestClassIndex = k;
330  }
331  numPositivePredictions++;
332  classLikelihoods[k] = result;
333  }else classLikelihoods[k] = 0;
334 
335  classDistances[k] = result;
336  sum += classLikelihoods[k];
337 
338  break;
339  case MAX_VALUE:
340  if( result > bestDistance ){
341  bestDistance = result;
342  bestClassIndex = k;
343  }
344  if( result < worstDistance ){
345  worstDistance = result;
346  }
347  numPositivePredictions++; //In the MAX_VALUE mode we assume all samples are valid
348  classLikelihoods[k] = result;
349  classDistances[k] = result;
350 
351  break;
352  default:
353  errorLog << "predict_(VectorFloat &inputVector) - Unknown prediction method!" << std::endl;
354  break;
355  }
356  }
357 
358  if( predictionMethod == MAX_VALUE ){
359  //Some of the class likelihoods might be negative, so we add the most negative value to each to offset this
360  worstDistance = fabs( worstDistance );
361  for(UINT k=0; k<numClasses; k++){
362  classLikelihoods[k] += worstDistance;
363  sum += classLikelihoods[k];
364  }
365  }
366 
367  //Normalize the class likelihoods
368  if( sum > 0 ){
369  for(UINT k=0; k<numClasses; k++)
370  classLikelihoods[k] /= sum;
371  }
372  maxLikelihood = classLikelihoods[ bestClassIndex ];
373 
374  if( numPositivePredictions == 0 ){
375  predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
376  }else predictedClassLabel = classLabels[ bestClassIndex ];
377 
378  return true;
379 }
380 
382 
383  if( trained ){
384  //Todo - need to add null rejection for AdaBoost
385  return false;
386  }
387  return false;
388 }
389 
390 bool AdaBoost::setNullRejectionCoeff(Float nullRejectionCoeff){
391 
392  if( nullRejectionCoeff > 0 ){
393  this->nullRejectionCoeff = nullRejectionCoeff;
395  return true;
396  }
397  return false;
398 }
399 
400 bool AdaBoost::save( std::fstream &file ) const{
401 
402  if(!file.is_open())
403  {
404  errorLog <<"save(fstream &file) - The file is not open!" << std::endl;
405  return false;
406  }
407 
408  //Write the header info
409  file<<"GRT_ADABOOST_MODEL_FILE_V2.0\n";
410 
411  //Write the classifier settings to the file
413  errorLog <<"save(fstream &file) - Failed to save classifier base settings to file!" << std::endl;
414  return false;
415  }
416 
417  //Write the AdaBoost settings to the file
418  file << "PredictionMethod: " << predictionMethod << std::endl;
419 
420  //If the model has been trained then write the model
421  if( trained ){
422  file << "Models: " << std::endl;
423  for(UINT i=0; i<models.size(); i++){
424  if( !models[i].save( file ) ){
425  errorLog <<"save(fstream &file) - Failed to write model " << i << " to file!" << std::endl;
426  file.close();
427  return false;
428  }
429  }
430  }
431 
432  return true;
433 }
434 
435 bool AdaBoost::load( std::fstream &file ){
436 
437  clear();
438 
439  if(!file.is_open())
440  {
441  errorLog << "load(string filename) - Could not open file to load model!" << std::endl;
442  return false;
443  }
444 
445  std::string word;
446  file >> word;
447 
448  //Check to see if we should load a legacy file
449  if( word == "GRT_ADABOOST_MODEL_FILE_V1.0" ){
450  return loadLegacyModelFromFile( file );
451  }
452 
453  if( word != "GRT_ADABOOST_MODEL_FILE_V2.0" ){
454  errorLog <<"load(fstream &file) - Failed to read file header!" << std::endl;
455  errorLog << word << std::endl;
456  return false;
457  }
458 
459  //Load the base settings from the file
461  errorLog << "load(string filename) - Failed to load base settings from file!" << std::endl;
462  return false;
463  }
464 
465  file >> word;
466  if( word != "PredictionMethod:" ){
467  errorLog <<"load(fstream &file) - Failed to read PredictionMethod header!" << std::endl;
468  return false;
469  }
470  file >> predictionMethod;
471 
472  if( trained ){
473  file >> word;
474  if( word != "Models:" ){
475  errorLog <<"load(fstream &file) - Failed to read Models header!" << std::endl;
476  return false;
477  }
478 
479  //Load the models
480  models.resize( numClasses );
481  for(UINT i=0; i<models.size(); i++){
482  if( !models[i].load( file ) ){
483  errorLog << "load(fstream &file) - Failed to load model " << i << " from file!" << std::endl;
484  file.close();
485  return false;
486  }
487  }
488 
489  //Recompute the null rejection thresholds
491 
492  //Resize the prediction results to make sure it is setup for realtime prediction
493  maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
494  bestDistance = DEFAULT_NULL_DISTANCE_VALUE;
495  classLikelihoods.resize(numClasses,DEFAULT_NULL_LIKELIHOOD_VALUE);
496  classDistances.resize(numClasses,DEFAULT_NULL_DISTANCE_VALUE);
497  }
498 
499  return true;
500 }
501 
503 
504  //Clear the Classifier variables
506 
507  //Clear the AdaBoost model
508  models.clear();
509 
510  return true;
511 }
512 
513 bool AdaBoost::setWeakClassifier(const WeakClassifier &weakClassifer){
514 
515  //Clear any previous weak classifers
517 
518  WeakClassifier *weakClassiferPtr = weakClassifer.createNewInstance();
519 
520  weakClassifiers.push_back( weakClassiferPtr );
521 
522  return true;
523 }
524 
525 bool AdaBoost::addWeakClassifier(const WeakClassifier &weakClassifer){
526 
527  WeakClassifier *weakClassiferPtr = weakClassifer.createNewInstance();
528  weakClassifiers.push_back( weakClassiferPtr );
529 
530  return true;
531 }
532 
534 
535  for(UINT i=0; i<weakClassifiers.size(); i++){
536  if( weakClassifiers[i] != NULL ){
537  delete weakClassifiers[i];
538  weakClassifiers[i] = NULL;
539  }
540  }
541  weakClassifiers.clear();
542  return true;
543 }
544 
545 bool AdaBoost::setNumBoostingIterations(UINT numBoostingIterations){
546  if( numBoostingIterations > 0 ){
547  this->numBoostingIterations = numBoostingIterations;
548  return true;
549  }
550  return false;
551 }
552 
553 bool AdaBoost::setPredictionMethod(UINT predictionMethod){
554  if( predictionMethod != MAX_POSITIVE_VALUE && predictionMethod != MAX_VALUE ){
555  return false;
556  }
557  this->predictionMethod = predictionMethod;
558  return true;
559 }
560 
562 
563  std::cout <<"AdaBoostModel: \n";
564  std::cout<<"NumFeatures: " << numInputDimensions << std::endl;
565  std::cout<<"NumClasses: " << numClasses << std::endl;
566  std::cout <<"UseScaling: " << useScaling << std::endl;
567  std::cout<<"UseNullRejection: " << useNullRejection << std::endl;
568 
569  for(UINT k=0; k<numClasses; k++){
570  std::cout << "Class: " << k+1 << " ClassLabel: " << classLabels[k] << std::endl;
571  models[k].print();
572  }
573 
574 }
575 
576 bool AdaBoost::loadLegacyModelFromFile( std::fstream &file ){
577 
578  std::string word;
579 
580  file >> word;
581  if( word != "NumFeatures:" ){
582  errorLog <<"load(fstream &file) - Failed to read NumFeatures header!" << std::endl;
583  return false;
584  }
585  file >> numInputDimensions;
586 
587  file >> word;
588  if( word != "NumClasses:" ){
589  errorLog <<"load(fstream &file) - Failed to read NumClasses header!" << std::endl;
590  return false;
591  }
592  file >> numClasses;
593 
594  file >> word;
595  if( word != "UseScaling:" ){
596  errorLog <<"load(fstream &file) - Failed to read UseScaling header!" << std::endl;
597  return false;
598  }
599  file >> useScaling;
600 
601  file >> word;
602  if( word != "UseNullRejection:" ){
603  errorLog <<"load(fstream &file) - Failed to read UseNullRejection header!" << std::endl;
604  return false;
605  }
606  file >> useNullRejection;
607 
608  if( useScaling ){
609  file >> word;
610  if( word != "Ranges:" ){
611  errorLog <<"load(fstream &file) - Failed to read Ranges header!" << std::endl;
612  return false;
613  }
614  ranges.resize( numInputDimensions );
615 
616  for(UINT n=0; n<ranges.size(); n++){
617  file >> ranges[n].minValue;
618  file >> ranges[n].maxValue;
619  }
620  }
621 
622  file >> word;
623  if( word != "Trained:" ){
624  errorLog <<"load(fstream &file) - Failed to read Trained header!" << std::endl;
625  return false;
626  }
627  file >> trained;
628 
629  file >> word;
630  if( word != "PredictionMethod:" ){
631  errorLog <<"load(fstream &file) - Failed to read PredictionMethod header!" << std::endl;
632  return false;
633  }
634  file >> predictionMethod;
635 
636  if( trained ){
637  file >> word;
638  if( word != "Models:" ){
639  errorLog <<"load(fstream &file) - Failed to read Models header!" << std::endl;
640  return false;
641  }
642 
643  //Load the models
644  models.resize( numClasses );
645  classLabels.resize( numClasses );
646  for(UINT i=0; i<models.size(); i++){
647  if( !models[i].load( file ) ){
648  errorLog << "load(fstream &file) - Failed to load model " << i << " from file!" << std::endl;
649  file.close();
650  return false;
651  }
652 
653  //Set the class label
654  classLabels[i] = models[i].getClassLabel();
655  }
656  }
657 
658  //Recompute the null rejection thresholds
660 
661  //Resize the prediction results to make sure it is setup for realtime prediction
662  maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
663  bestDistance = DEFAULT_NULL_DISTANCE_VALUE;
664  classLikelihoods.resize(numClasses,DEFAULT_NULL_LIKELIHOOD_VALUE);
665  classDistances.resize(numClasses,DEFAULT_NULL_DISTANCE_VALUE);
666 
667  return true;
668 }
669 
670 GRT_END_NAMESPACE
bool saveBaseSettingsToFile(std::fstream &file) const
Definition: Classifier.cpp:256
bool setPredictionMethod(UINT predictionMethod)
Definition: AdaBoost.cpp:553
#define DEFAULT_NULL_LIKELIHOOD_VALUE
Definition: Classifier.h:38
Float scale(const Float &x, const Float &minSource, const Float &maxSource, const Float &minTarget, const Float &maxTarget, const bool constrain=false)
Definition: MLBase.h:353
virtual Float predict(const VectorFloat &x)
virtual bool save(std::fstream &file) const
Definition: AdaBoost.cpp:400
bool addSample(UINT classLabel, const VectorFloat &sample)
AdaBoost(const WeakClassifier &weakClassifier=DecisionStump(), bool useScaling=false, bool useNullRejection=false, Float nullRejectionCoeff=10.0, UINT numBoostingIterations=20, UINT predictionMethod=MAX_VALUE)
Definition: AdaBoost.cpp:29
std::string getClassifierType() const
Definition: Classifier.cpp:161
void printModel()
Definition: AdaBoost.cpp:561
virtual bool resize(const unsigned int size)
Definition: Vector.h:133
bool setNumDimensions(UINT numDimensions)
This class contains the AdaBoost classifier. AdaBoost (Adaptive Boosting) is a powerful classifier th...
Vector< UINT > getClassLabels() const
virtual bool train_(ClassificationData &trainingData)
Definition: AdaBoost.cpp:115
virtual bool train(ClassificationData &trainingData, VectorFloat &weights)
bool setNumBoostingIterations(UINT numBoostingIterations)
Definition: AdaBoost.cpp:545
virtual Float getPositiveClassLabel() const
WeakClassifier * createNewInstance() const
UINT getNumSamples() const
#define WEAK_CLASSIFIER_POSITIVE_CLASS_LABEL
virtual bool load(std::fstream &file)
Definition: AdaBoost.cpp:435
bool addWeakClassifier(const WeakClassifier &weakClassifer)
Definition: AdaBoost.cpp:525
virtual bool recomputeNullRejectionThresholds()
Definition: AdaBoost.cpp:381
virtual bool deepCopyFrom(const Classifier *classifier)
Definition: AdaBoost.cpp:84
virtual ~AdaBoost()
Definition: AdaBoost.cpp:56
virtual bool predict_(VectorFloat &inputVector)
Definition: AdaBoost.cpp:292
bool clearWeakClassifiers()
Definition: AdaBoost.cpp:533
bool copyBaseVariables(const Classifier *classifier)
Definition: Classifier.cpp:93
bool loadBaseSettingsFromFile(std::fstream &file)
Definition: Classifier.cpp:303
UINT getNumDimensions() const
UINT getNumClasses() const
virtual bool clear()
Definition: AdaBoost.cpp:502
bool setWeakClassifier(const WeakClassifier &weakClassifer)
Definition: AdaBoost.cpp:513
Vector< MinMax > getRanges() const
AdaBoost & operator=(const AdaBoost &rhs)
Definition: AdaBoost.cpp:62
bool setNullRejectionCoeff(Float nullRejectionCoeff)
Definition: AdaBoost.cpp:390
bool scale(const Float minTarget, const Float maxTarget)
virtual bool clear()
Definition: Classifier.cpp:142