GestureRecognitionToolkit  Version: 0.1.0
The Gesture Recognition Toolkit (GRT) is a cross-platform, open-source, c++ machine learning library for real-time gesture recognition.
AdaBoost.cpp
1 /*
2 GRT MIT License
3 Copyright (c) <2012> <Nicholas Gillian, Media Lab, MIT>
4 
5 Permission is hereby granted, free of charge, to any person obtaining a copy of this software
6 and associated documentation files (the "Software"), to deal in the Software without restriction,
7 including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
9 subject to the following conditions:
10 
11 The above copyright notice and this permission notice shall be included in all copies or substantial
12 portions of the Software.
13 
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
15 LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
16 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
17 WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
18 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19 */
20 
21 #include "AdaBoost.h"
22 
23 GRT_BEGIN_NAMESPACE
24 
25 //Register the AdaBoost module with the Classifier base class
26 RegisterClassifierModule< AdaBoost > AdaBoost::registerModule("AdaBoost");
27 
28 AdaBoost::AdaBoost(const WeakClassifier &weakClassifier,bool useScaling,bool useNullRejection,Float nullRejectionCoeff,UINT numBoostingIterations,UINT predictionMethod)
29 {
30  setWeakClassifier( weakClassifier );
31  this->useScaling = useScaling;
32  this->useNullRejection = useNullRejection;
33  this->nullRejectionCoeff = nullRejectionCoeff;
34  this->numBoostingIterations = numBoostingIterations;
35  this->predictionMethod = predictionMethod;
36  classType = "AdaBoost";
37  classifierType = classType;
38  classifierMode = STANDARD_CLASSIFIER_MODE;
39  debugLog.setProceedingText("[DEBUG AdaBoost]");
40  errorLog.setProceedingText("[ERROR AdaBoost]");
41  trainingLog.setProceedingText("[TRAINING AdaBoost]");
42  warningLog.setProceedingText("[WARNING AdaBoost]");
43 }
44 
46  classifierType = "AdaBoost";
47  classifierMode = STANDARD_CLASSIFIER_MODE;
48  debugLog.setProceedingText("[DEBUG AdaBoost]");
49  errorLog.setProceedingText("[ERROR AdaBoost]");
50  trainingLog.setProceedingText("[TRAINING AdaBoost]");
51  warningLog.setProceedingText("[WARNING AdaBoost]");
52  *this = rhs;
53 }
54 
56 {
57  //Clean up any weak classifiers
59 }
60 
62  if( this != &rhs ){
63  //Clear the current weak classifiers
65 
66  this->numBoostingIterations = rhs.numBoostingIterations;
67  this->predictionMethod = rhs.predictionMethod;
68  this->models = rhs.models;
69 
70  if( rhs.weakClassifiers.size() > 0 ){
71  for(UINT i=0; i<rhs.weakClassifiers.size(); i++){
72  WeakClassifier *weakClassiferPtr = rhs.weakClassifiers[i]->createNewInstance();
73  weakClassifiers.push_back( weakClassiferPtr );
74  }
75  }
76 
77  //Clone the classifier variables
78  copyBaseVariables( (Classifier*)&rhs );
79  }
80  return *this;
81 }
82 
83 bool AdaBoost::deepCopyFrom(const Classifier *classifier){
84 
85  if( classifier == NULL ){
86  errorLog << "deepCopyFrom(const Classifier *classifier) - The classifier pointer is NULL!" << std::endl;
87  return false;
88  }
89 
90  if( this->getClassifierType() == classifier->getClassifierType() ){
91  //Clone the AdaBoost values
92  AdaBoost *ptr = (AdaBoost*)classifier;
93 
94  //Clear the current weak classifiers
96 
97  this->numBoostingIterations = ptr->numBoostingIterations;
98  this->predictionMethod = ptr->predictionMethod;
99  this->models = ptr->models;
100 
101  if( ptr->weakClassifiers.size() > 0 ){
102  for(UINT i=0; i<ptr->weakClassifiers.size(); i++){
103  WeakClassifier *weakClassiferPtr = ptr->weakClassifiers[i]->createNewInstance();
104  weakClassifiers.push_back( weakClassiferPtr );
105  }
106  }
107 
108  //Clone the classifier variables
109  return copyBaseVariables( classifier );
110  }
111  return false;
112 }
113 
115 
116  //Clear any previous model
117  clear();
118 
119  if( trainingData.getNumSamples() <= 1 ){
120  errorLog << "train_(ClassificationData &trainingData) - There are not enough training samples to train a model! Number of samples: " << trainingData.getNumSamples() << std::endl;
121  return false;
122  }
123 
124  numInputDimensions = trainingData.getNumDimensions();
125  numClasses = trainingData.getNumClasses();
126  const UINT M = trainingData.getNumSamples();
127  const UINT POSITIVE_LABEL = WEAK_CLASSIFIER_POSITIVE_CLASS_LABEL;
128  const UINT NEGATIVE_LABEL = WEAK_CLASSIFIER_NEGATIVE_CLASS_LABEL;
129  Float alpha = 0;
130  const Float beta = 0.001;
131  Float epsilon = 0;
132  TrainingResult trainingResult;
133 
134  const UINT K = (UINT)weakClassifiers.size();
135  if( K == 0 ){
136  errorLog << "train_(ClassificationData &trainingData) - No weakClassifiers have been set. You need to set at least one weak classifier first." << std::endl;
137  return false;
138  }
139 
140  classLabels.resize(numClasses);
141  models.resize(numClasses);
142  ranges = trainingData.getRanges();
143 
144  //Scale the training data if needed
145  if( useScaling ){
146  trainingData.scale(ranges,0,1);
147  }
148 
149  //Create the weights vector
150  VectorFloat weights(M);
151 
152  //Create the error matrix
153  MatrixFloat errorMatrix(K,M);
154 
155  for(UINT classIter=0; classIter<numClasses; classIter++){
156 
157  //Get the class label for the current class
158  classLabels[classIter] = trainingData.getClassLabels()[classIter];
159 
160  //Set the class label of the current model
161  models[ classIter ].setClassLabel( classLabels[classIter] );
162 
163  //Setup the labels for this class, POSITIVE_LABEL == 1, NEGATIVE_LABEL == 2
164  ClassificationData classData;
165  classData.setNumDimensions(trainingData.getNumDimensions());
166  for(UINT i=0; i<M; i++){
167  UINT label = trainingData[i].getClassLabel()==classLabels[classIter] ? POSITIVE_LABEL : NEGATIVE_LABEL;
168  VectorFloat trainingSample = trainingData[i].getSample();
169  classData.addSample(label,trainingSample);
170  }
171 
172  //Setup the initial training sample weights
173  std::fill(weights.begin(),weights.end(),1.0/M);
174 
175  //Run the boosting loop
176  bool keepBoosting = true;
177  UINT t = 0;
178 
179  while( keepBoosting ){
180 
181  //Pick the classifier from the family of classifiers that minimizes the total error
182  UINT bestClassifierIndex = 0;
183  Float minError = grt_numeric_limits< Float >::max();
184  for(UINT k=0; k<K; k++){
185  //Get the k'th possible classifier
186  WeakClassifier *weakLearner = weakClassifiers[k];
187 
188  //Train the current classifier
189  if( !weakLearner->train(classData,weights) ){
190  errorLog << "Failed to train weakLearner!" << std::endl;
191  return false;
192  }
193 
194  //Compute the weighted error for this clasifier
195  Float e = 0;
196  Float positiveLabel = weakLearner->getPositiveClassLabel();
197  Float numCorrect = 0;
198  Float numIncorrect = 0;
199  for(UINT i=0; i<M; i++){
200  //Only penalize errors
201  Float prediction = weakLearner->predict( classData[i].getSample() );
202 
203  if( (prediction == positiveLabel && classData[i].getClassLabel() != POSITIVE_LABEL) || //False positive
204  (prediction != positiveLabel && classData[i].getClassLabel() == POSITIVE_LABEL) ){ //False negative
205  e += weights[i]; //Increase the error proportional to the weight of the example
206  errorMatrix[k][i] = 1; //Flag that there was an error
207  numIncorrect++;
208  }else{
209  errorMatrix[k][i] = 0; //Flag that there was no error
210  numCorrect++;
211  }
212  }
213 
214  trainingLog << "PositiveClass: " << classLabels[classIter] << " Boosting Iter: " << t << " Classifier: " << k << " WeightedError: " << e << " NumCorrect: " << numCorrect/M << " NumIncorrect: " <<numIncorrect/M << std::endl;
215 
216  if( e < minError ){
217  minError = e;
218  bestClassifierIndex = k;
219  }
220 
221  }
222 
223  epsilon = minError;
224 
225  //Set alpha, using the M1 weight value, small weights (close to 0) will receive a strong weight in the final classifier
226  alpha = 0.5 * log( (1.0-epsilon)/epsilon );
227 
228  trainingLog << "PositiveClass: " << classLabels[classIter] << " Boosting Iter: " << t << " Best Classifier Index: " << bestClassifierIndex << " MinError: " << minError << " Alpha: " << alpha << std::endl;
229 
230  if( grt_isinf(alpha) ){ keepBoosting = false; trainingLog << "Alpha is INF. Stopping boosting for current class" << std::endl; }
231  if( 0.5 - epsilon <= beta ){ keepBoosting = false; trainingLog << "Epsilon <= Beta. Stopping boosting for current class" << std::endl; }
232  if( ++t >= numBoostingIterations ) keepBoosting = false;
233 
234  trainingResult.setClassificationResult(t, minError, this);
235  trainingResults.push_back(trainingResult);
236  trainingResultsObserverManager.notifyObservers( trainingResult );
237 
238  if( keepBoosting ){
239 
240  //Add the best weak classifier to the committee
241  models[ classIter ].addClassifierToCommitee( weakClassifiers[bestClassifierIndex], alpha );
242 
243  //Update the weights for the next boosting iteration
244  Float reWeight = (1.0 - epsilon) / epsilon;
245  Float oldSum = 0;
246  Float newSum = 0;
247  for(UINT i=0; i<M; i++){
248  oldSum += weights[i];
249  //Only update the weights that resulted in an incorrect prediction
250  if( errorMatrix[bestClassifierIndex][i] == 1 ) weights[i] *= reWeight;
251  newSum += weights[i];
252  }
253 
254  //Normalize all the weights
255  //This results to increasing the weights of the samples that were incorrectly labelled
256  //While decreasing the weights of the samples that were correctly classified
257  reWeight = oldSum/newSum;
258  for(UINT i=0; i<M; i++){
259  weights[i] *= reWeight;
260  }
261 
262  }else{
263  trainingLog << "Stopping boosting training at iteration : " << t-1 << " with an error of " << epsilon << std::endl;
264  if( t-1 == 0 ){
265  //Add the best weak classifier to the committee (we have to add it as this is the first iteration)
266  if( grt_isinf(alpha) ){ alpha = 1; } //If alpha is infinite then the first classifier got everything correct
267  models[ classIter ].addClassifierToCommitee( weakClassifiers[bestClassifierIndex], alpha );
268  }
269  }
270 
271  }
272  }
273 
274  //Normalize the weights
275  for(UINT k=0; k<numClasses; k++){
276  models[k].normalizeWeights();
277  }
278 
279  //Flag that the model has been trained
280  trained = true;
281 
282  //Setup the data for prediction
283  predictedClassLabel = 0;
284  maxLikelihood = 0;
285  classLikelihoods.resize(numClasses);
286  classDistances.resize(numClasses);
287 
288  return true;
289 }
290 
291 bool AdaBoost::predict_(VectorFloat &inputVector){
292 
293  predictedClassLabel = 0;
294  maxLikelihood = -10000;
295 
296  if( !trained ){
297  errorLog << "predict_(VectorFloat &inputVector) - AdaBoost Model Not Trained!" << std::endl;
298  return false;
299  }
300 
301  if( inputVector.size() != numInputDimensions ){
302  errorLog << "predict_(VectorFloat &inputVector) - The size of the input vector (" << inputVector.size() << ") does not match the num features in the model (" << numInputDimensions << std::endl;
303  return false;
304  }
305 
306  if( useScaling ){
307  for(UINT n=0; n<numInputDimensions; n++){
308  inputVector[n] = scale(inputVector[n], ranges[n].minValue, ranges[n].maxValue, 0, 1);
309  }
310  }
311 
312  if( classLikelihoods.size() != numClasses ) classLikelihoods.resize(numClasses,0);
313  if( classDistances.size() != numClasses ) classDistances.resize(numClasses,0);
314 
315  UINT bestClassIndex = 0;
316  UINT numPositivePredictions = 0;
317  bestDistance = -grt_numeric_limits< Float >::max();
318  Float worstDistance = grt_numeric_limits< Float >::max();
319  Float sum = 0;
320  for(UINT k=0; k<numClasses; k++){
321  Float result = models[k].predict( inputVector );
322 
323  switch ( predictionMethod ) {
324  case MAX_POSITIVE_VALUE:
325  if( result > 0 ){
326  if( result > bestDistance ){
327  bestDistance = result;
328  bestClassIndex = k;
329  }
330  numPositivePredictions++;
331  classLikelihoods[k] = result;
332  }else classLikelihoods[k] = 0;
333 
334  classDistances[k] = result;
335  sum += classLikelihoods[k];
336 
337  break;
338  case MAX_VALUE:
339  if( result > bestDistance ){
340  bestDistance = result;
341  bestClassIndex = k;
342  }
343  if( result < worstDistance ){
344  worstDistance = result;
345  }
346  numPositivePredictions++; //In the MAX_VALUE mode we assume all samples are valid
347  classLikelihoods[k] = result;
348  classDistances[k] = result;
349 
350  break;
351  default:
352  errorLog << "predict_(VectorFloat &inputVector) - Unknown prediction method!" << std::endl;
353  break;
354  }
355  }
356 
357  if( predictionMethod == MAX_VALUE ){
358  //Some of the class likelihoods might be negative, so we add the most negative value to each to offset this
359  worstDistance = fabs( worstDistance );
360  for(UINT k=0; k<numClasses; k++){
361  classLikelihoods[k] += worstDistance;
362  sum += classLikelihoods[k];
363  }
364  }
365 
366  //Normalize the class likelihoods
367  if( sum > 0 ){
368  for(UINT k=0; k<numClasses; k++)
369  classLikelihoods[k] /= sum;
370  }
371  maxLikelihood = classLikelihoods[ bestClassIndex ];
372 
373  if( numPositivePredictions == 0 ){
374  predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
375  }else predictedClassLabel = classLabels[ bestClassIndex ];
376 
377  return true;
378 }
379 
381 
382  if( trained ){
383  //Todo - need to add null rejection for AdaBoost
384  return false;
385  }
386  return false;
387 }
388 
389 bool AdaBoost::setNullRejectionCoeff(Float nullRejectionCoeff){
390 
391  if( nullRejectionCoeff > 0 ){
392  this->nullRejectionCoeff = nullRejectionCoeff;
394  return true;
395  }
396  return false;
397 }
398 
399 bool AdaBoost::saveModelToFile( std::fstream &file ) const{
400 
401  if(!file.is_open())
402  {
403  errorLog <<"saveModelToFile(fstream &file) - The file is not open!" << std::endl;
404  return false;
405  }
406 
407  //Write the header info
408  file<<"GRT_ADABOOST_MODEL_FILE_V2.0\n";
409 
410  //Write the classifier settings to the file
412  errorLog <<"saveModelToFile(fstream &file) - Failed to save classifier base settings to file!" << std::endl;
413  return false;
414  }
415 
416  //Write the AdaBoost settings to the file
417  file << "PredictionMethod: " << predictionMethod << std::endl;
418 
419  //If the model has been trained then write the model
420  if( trained ){
421  file << "Models: " << std::endl;
422  for(UINT i=0; i<models.size(); i++){
423  if( !models[i].saveModelToFile( file ) ){
424  errorLog <<"saveModelToFile(fstream &file) - Failed to write model " << i << " to file!" << std::endl;
425  file.close();
426  return false;
427  }
428  }
429  }
430 
431  return true;
432 }
433 
434 bool AdaBoost::loadModelFromFile( std::fstream &file ){
435 
436  clear();
437 
438  if(!file.is_open())
439  {
440  errorLog << "loadModelFromFile(string filename) - Could not open file to load model!" << std::endl;
441  return false;
442  }
443 
444  std::string word;
445  file >> word;
446 
447  //Check to see if we should load a legacy file
448  if( word == "GRT_ADABOOST_MODEL_FILE_V1.0" ){
449  return loadLegacyModelFromFile( file );
450  }
451 
452  if( word != "GRT_ADABOOST_MODEL_FILE_V2.0" ){
453  errorLog <<"loadModelFromFile(fstream &file) - Failed to read file header!" << std::endl;
454  errorLog << word << std::endl;
455  return false;
456  }
457 
458  //Load the base settings from the file
460  errorLog << "loadModelFromFile(string filename) - Failed to load base settings from file!" << std::endl;
461  return false;
462  }
463 
464  file >> word;
465  if( word != "PredictionMethod:" ){
466  errorLog <<"loadModelFromFile(fstream &file) - Failed to read PredictionMethod header!" << std::endl;
467  return false;
468  }
469  file >> predictionMethod;
470 
471  if( trained ){
472  file >> word;
473  if( word != "Models:" ){
474  errorLog <<"loadModelFromFile(fstream &file) - Failed to read Models header!" << std::endl;
475  return false;
476  }
477 
478  //Load the models
479  models.resize( numClasses );
480  for(UINT i=0; i<models.size(); i++){
481  if( !models[i].loadModelFromFile( file ) ){
482  errorLog << "loadModelFromFile(fstream &file) - Failed to load model " << i << " from file!" << std::endl;
483  file.close();
484  return false;
485  }
486  }
487 
488  //Recompute the null rejection thresholds
490 
491  //Resize the prediction results to make sure it is setup for realtime prediction
492  maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
493  bestDistance = DEFAULT_NULL_DISTANCE_VALUE;
494  classLikelihoods.resize(numClasses,DEFAULT_NULL_LIKELIHOOD_VALUE);
495  classDistances.resize(numClasses,DEFAULT_NULL_DISTANCE_VALUE);
496  }
497 
498  return true;
499 }
500 
502 
503  //Clear the Classifier variables
505 
506  //Clear the AdaBoost model
507  models.clear();
508 
509  return true;
510 }
511 
512 bool AdaBoost::setWeakClassifier(const WeakClassifier &weakClassifer){
513 
514  //Clear any previous weak classifers
516 
517  WeakClassifier *weakClassiferPtr = weakClassifer.createNewInstance();
518 
519  weakClassifiers.push_back( weakClassiferPtr );
520 
521  return true;
522 }
523 
524 bool AdaBoost::addWeakClassifier(const WeakClassifier &weakClassifer){
525 
526  WeakClassifier *weakClassiferPtr = weakClassifer.createNewInstance();
527  weakClassifiers.push_back( weakClassiferPtr );
528 
529  return true;
530 }
531 
533 
534  for(UINT i=0; i<weakClassifiers.size(); i++){
535  if( weakClassifiers[i] != NULL ){
536  delete weakClassifiers[i];
537  weakClassifiers[i] = NULL;
538  }
539  }
540  weakClassifiers.clear();
541  return true;
542 }
543 
544 bool AdaBoost::setNumBoostingIterations(UINT numBoostingIterations){
545  if( numBoostingIterations > 0 ){
546  this->numBoostingIterations = numBoostingIterations;
547  return true;
548  }
549  return false;
550 }
551 
552 bool AdaBoost::setPredictionMethod(UINT predictionMethod){
553  if( predictionMethod != MAX_POSITIVE_VALUE && predictionMethod != MAX_VALUE ){
554  return false;
555  }
556  this->predictionMethod = predictionMethod;
557  return true;
558 }
559 
561 
562  std::cout <<"AdaBoostModel: \n";
563  std::cout<<"NumFeatures: " << numInputDimensions << std::endl;
564  std::cout<<"NumClasses: " << numClasses << std::endl;
565  std::cout <<"UseScaling: " << useScaling << std::endl;
566  std::cout<<"UseNullRejection: " << useNullRejection << std::endl;
567 
568  for(UINT k=0; k<numClasses; k++){
569  std::cout << "Class: " << k+1 << " ClassLabel: " << classLabels[k] << std::endl;
570  models[k].print();
571  }
572 
573 }
574 
575 bool AdaBoost::loadLegacyModelFromFile( std::fstream &file ){
576 
577  std::string word;
578 
579  file >> word;
580  if( word != "NumFeatures:" ){
581  errorLog <<"loadModelFromFile(fstream &file) - Failed to read NumFeatures header!" << std::endl;
582  return false;
583  }
584  file >> numInputDimensions;
585 
586  file >> word;
587  if( word != "NumClasses:" ){
588  errorLog <<"loadModelFromFile(fstream &file) - Failed to read NumClasses header!" << std::endl;
589  return false;
590  }
591  file >> numClasses;
592 
593  file >> word;
594  if( word != "UseScaling:" ){
595  errorLog <<"loadModelFromFile(fstream &file) - Failed to read UseScaling header!" << std::endl;
596  return false;
597  }
598  file >> useScaling;
599 
600  file >> word;
601  if( word != "UseNullRejection:" ){
602  errorLog <<"loadModelFromFile(fstream &file) - Failed to read UseNullRejection header!" << std::endl;
603  return false;
604  }
605  file >> useNullRejection;
606 
607  if( useScaling ){
608  file >> word;
609  if( word != "Ranges:" ){
610  errorLog <<"loadModelFromFile(fstream &file) - Failed to read Ranges header!" << std::endl;
611  return false;
612  }
613  ranges.resize( numInputDimensions );
614 
615  for(UINT n=0; n<ranges.size(); n++){
616  file >> ranges[n].minValue;
617  file >> ranges[n].maxValue;
618  }
619  }
620 
621  file >> word;
622  if( word != "Trained:" ){
623  errorLog <<"loadModelFromFile(fstream &file) - Failed to read Trained header!" << std::endl;
624  return false;
625  }
626  file >> trained;
627 
628  file >> word;
629  if( word != "PredictionMethod:" ){
630  errorLog <<"loadModelFromFile(fstream &file) - Failed to read PredictionMethod header!" << std::endl;
631  return false;
632  }
633  file >> predictionMethod;
634 
635  if( trained ){
636  file >> word;
637  if( word != "Models:" ){
638  errorLog <<"loadModelFromFile(fstream &file) - Failed to read Models header!" << std::endl;
639  return false;
640  }
641 
642  //Load the models
643  models.resize( numClasses );
644  classLabels.resize( numClasses );
645  for(UINT i=0; i<models.size(); i++){
646  if( !models[i].loadModelFromFile( file ) ){
647  errorLog << "loadModelFromFile(fstream &file) - Failed to load model " << i << " from file!" << std::endl;
648  file.close();
649  return false;
650  }
651 
652  //Set the class label
653  classLabels[i] = models[i].getClassLabel();
654  }
655  }
656 
657  //Recompute the null rejection thresholds
659 
660  //Resize the prediction results to make sure it is setup for realtime prediction
661  maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
662  bestDistance = DEFAULT_NULL_DISTANCE_VALUE;
663  classLikelihoods.resize(numClasses,DEFAULT_NULL_LIKELIHOOD_VALUE);
664  classDistances.resize(numClasses,DEFAULT_NULL_DISTANCE_VALUE);
665 
666  return true;
667 }
668 
669 GRT_END_NAMESPACE
670 
bool saveBaseSettingsToFile(std::fstream &file) const
Definition: Classifier.cpp:255
bool setPredictionMethod(UINT predictionMethod)
Definition: AdaBoost.cpp:552
#define DEFAULT_NULL_LIKELIHOOD_VALUE
Definition: Classifier.h:38
Float scale(const Float &x, const Float &minSource, const Float &maxSource, const Float &minTarget, const Float &maxTarget, const bool constrain=false)
Definition: MLBase.h:339
virtual Float predict(const VectorFloat &x)
bool addSample(UINT classLabel, const VectorFloat &sample)
AdaBoost(const WeakClassifier &weakClassifier=DecisionStump(), bool useScaling=false, bool useNullRejection=false, Float nullRejectionCoeff=10.0, UINT numBoostingIterations=20, UINT predictionMethod=MAX_VALUE)
Definition: AdaBoost.cpp:28
std::string getClassifierType() const
Definition: Classifier.cpp:160
void printModel()
Definition: AdaBoost.cpp:560
virtual bool resize(const unsigned int size)
Definition: Vector.h:133
bool setNumDimensions(UINT numDimensions)
This class contains the AdaBoost classifier. AdaBoost (Adaptive Boosting) is a powerful classifier th...
Vector< UINT > getClassLabels() const
virtual bool train_(ClassificationData &trainingData)
Definition: AdaBoost.cpp:114
virtual bool train(ClassificationData &trainingData, VectorFloat &weights)
virtual bool loadModelFromFile(std::fstream &file)
Definition: AdaBoost.cpp:434
bool setNumBoostingIterations(UINT numBoostingIterations)
Definition: AdaBoost.cpp:544
virtual bool saveModelToFile(std::fstream &file) const
Definition: AdaBoost.cpp:399
virtual Float getPositiveClassLabel() const
WeakClassifier * createNewInstance() const
UINT getNumSamples() const
#define WEAK_CLASSIFIER_POSITIVE_CLASS_LABEL
bool addWeakClassifier(const WeakClassifier &weakClassifer)
Definition: AdaBoost.cpp:524
virtual bool recomputeNullRejectionThresholds()
Definition: AdaBoost.cpp:380
virtual bool deepCopyFrom(const Classifier *classifier)
Definition: AdaBoost.cpp:83
virtual ~AdaBoost()
Definition: AdaBoost.cpp:55
virtual bool predict_(VectorFloat &inputVector)
Definition: AdaBoost.cpp:291
bool clearWeakClassifiers()
Definition: AdaBoost.cpp:532
bool copyBaseVariables(const Classifier *classifier)
Definition: Classifier.cpp:92
bool loadBaseSettingsFromFile(std::fstream &file)
Definition: Classifier.cpp:302
UINT getNumDimensions() const
UINT getNumClasses() const
virtual bool clear()
Definition: AdaBoost.cpp:501
bool setWeakClassifier(const WeakClassifier &weakClassifer)
Definition: AdaBoost.cpp:512
Vector< MinMax > getRanges() const
AdaBoost & operator=(const AdaBoost &rhs)
Definition: AdaBoost.cpp:61
bool setNullRejectionCoeff(Float nullRejectionCoeff)
Definition: AdaBoost.cpp:389
bool scale(const Float minTarget, const Float maxTarget)
virtual bool clear()
Definition: Classifier.cpp:141