GestureRecognitionToolkit  Version: 0.1.0
The Gesture Recognition Toolkit (GRT) is a cross-platform, open-source, c++ machine learning library for real-time gesture recognition.
ANBC.cpp
1 /*
2 GRT MIT License
3 Copyright (c) <2012> <Nicholas Gillian, Media Lab, MIT>
4 
5 Permission is hereby granted, free of charge, to any person obtaining a copy of this software
6 and associated documentation files (the "Software"), to deal in the Software without restriction,
7 including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
9 subject to the following conditions:
10 
11 The above copyright notice and this permission notice shall be included in all copies or substantial
12 portions of the Software.
13 
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
15 LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
16 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
17 WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
18 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19 */
20 
21 #include "ANBC.h"
22 
23 GRT_BEGIN_NAMESPACE
24 
25 //Register the ANBC module with the Classifier base class
26 RegisterClassifierModule< ANBC > ANBC::registerModule("ANBC");
27 
28 ANBC::ANBC(bool useScaling,bool useNullRejection,Float nullRejectionCoeff)
29 {
30  this->useScaling = useScaling;
31  this->useNullRejection = useNullRejection;
32  this->nullRejectionCoeff = nullRejectionCoeff;
33  supportsNullRejection = true;
34  weightsDataSet = false;
35  classType = "ANBC";
36  classifierType = classType;
37  classifierMode = STANDARD_CLASSIFIER_MODE;
38  debugLog.setProceedingText("[DEBUG ANBC]");
39  errorLog.setProceedingText("[ERROR ANBC]");
40  trainingLog.setProceedingText("[TRAINING ANBC]");
41  warningLog.setProceedingText("[WARNING ANBC]");
42 }
43 
44 ANBC::ANBC(const ANBC &rhs){
45  classType = "ANBC";
46  classifierType = classType;
47  classifierMode = STANDARD_CLASSIFIER_MODE;
48  debugLog.setProceedingText("[DEBUG ANBC]");
49  errorLog.setProceedingText("[ERROR ANBC]");
50  trainingLog.setProceedingText("[TRAINING ANBC]");
51  warningLog.setProceedingText("[WARNING ANBC]");
52  *this = rhs;
53 }
54 
56 {
57 }
58 
59 ANBC& ANBC::operator=(const ANBC &rhs){
60  if( this != &rhs ){
61  //ANBC variables
62  this->weightsDataSet = rhs.weightsDataSet;
63  this->weightsData = rhs.weightsData;
64  this->models = rhs.models;
65 
66  //Classifier variables
67  copyBaseVariables( (Classifier*)&rhs );
68  }
69  return *this;
70 }
71 
72 bool ANBC::deepCopyFrom(const Classifier *classifier){
73 
74  if( classifier == NULL ) return false;
75 
76  if( this->getClassifierType() == classifier->getClassifierType() ){
77 
78  ANBC *ptr = (ANBC*)classifier;
79  //Clone the ANBC values
80  this->weightsDataSet = ptr->weightsDataSet;
81  this->weightsData = ptr->weightsData;
82  this->models = ptr->models;
83 
84  //Clone the classifier variables
85  return copyBaseVariables( classifier );
86  }
87  return false;
88 }
89 
90 bool ANBC::train_(ClassificationData &trainingData){
91 
92  //Clear any previous model
93  clear();
94 
95  const unsigned int M = trainingData.getNumSamples();
96  const unsigned int N = trainingData.getNumDimensions();
97  const unsigned int K = trainingData.getNumClasses();
98 
99  if( M == 0 ){
100  errorLog << "train_(ClassificationData &trainingData) - Training data has zero samples!" << std::endl;
101  return false;
102  }
103 
104  if( weightsDataSet ){
105  if( weightsData.getNumDimensions() != N ){
106  errorLog << "train_(ClassificationData &trainingData) - The number of dimensions in the weights data (" << weightsData.getNumDimensions() << ") is not equal to the number of dimensions of the training data (" << N << ")" << std::endl;
107  return false;
108  }
109  }
110 
111  numInputDimensions = N;
112  numClasses = K;
113  models.resize(K);
114  classLabels.resize(K);
115  ranges = trainingData.getRanges();
116 
117  //Scale the training data if needed
118  if( useScaling ){
119  //Scale the training data between 0 and 1
120  trainingData.scale(0, 1);
121  }
122 
123  //Train each of the models
124  for(UINT k=0; k<numClasses; k++){
125 
126  //Get the class label for the kth class
127  UINT classLabel = trainingData.getClassTracker()[k].classLabel;
128 
129  //Set the kth class label
130  classLabels[k] = classLabel;
131 
132  //Get the weights for this class
133  VectorFloat weights(numInputDimensions);
134  if( weightsDataSet ){
135  bool weightsFound = false;
136  for(UINT i=0; i<weightsData.getNumSamples(); i++){
137  if( weightsData[i].getClassLabel() == classLabel ){
138  weights = weightsData[i].getSample();
139  weightsFound = true;
140  break;
141  }
142  }
143 
144  if( !weightsFound ){
145  errorLog << "train_(ClassificationData &trainingData) - Failed to find the weights for class " << classLabel << std::endl;
146  return false;
147  }
148  }else{
149  //If the weights data has not been set then all the weights are 1
150  for(UINT j=0; j<numInputDimensions; j++) weights[j] = 1.0;
151  }
152 
153  //Get all the training data for this class
154  ClassificationData classData = trainingData.getClassData(classLabel);
155  MatrixFloat data(classData.getNumSamples(),N);
156 
157  //Copy the training data into a matrix
158  for(UINT i=0; i<data.getNumRows(); i++){
159  for(UINT j=0; j<data.getNumCols(); j++){
160  data[i][j] = classData[i][j];
161  }
162  }
163 
164  //Train the model for this class
165  models[k].gamma = nullRejectionCoeff;
166  if( !models[k].train( classLabel, data, weights ) ){
167  errorLog << "train_(ClassificationData &trainingData) - Failed to train model for class: " << classLabel << std::endl;
168 
169  //Try and work out why the training failed
170  if( models[k].N == 0 ){
171  errorLog << "train_(ClassificationData &trainingData) - N == 0!" << std::endl;
172  models.clear();
173  return false;
174  }
175  for(UINT j=0; j<numInputDimensions; j++){
176  if( models[k].sigma[j] == 0 ){
177  errorLog << "train_(ClassificationData &trainingData) - The standard deviation of column " << j+1 << " is zero! Check the training data" << std::endl;
178  models.clear();
179  return false;
180  }
181  }
182  models.clear();
183  return false;
184  }
185 
186  }
187 
188  //Store the null rejection thresholds
189  nullRejectionThresholds.resize(numClasses);
190  for(UINT k=0; k<numClasses; k++) {
191  nullRejectionThresholds[k] = models[k].threshold;
192  }
193 
194  //Flag that the models have been trained
195  trained = true;
196  return trained;
197 }
198 
199 bool ANBC::predict_(VectorFloat &inputVector){
200 
201  if( !trained ){
202  errorLog << "predict_(VectorFloat &inputVector) - ANBC Model Not Trained!" << std::endl;
203  return false;
204  }
205 
206  predictedClassLabel = 0;
207  maxLikelihood = -10000;
208 
209  if( !trained ) return false;
210 
211  if( inputVector.size() != numInputDimensions ){
212  errorLog << "predict_(VectorFloat &inputVector) - The size of the input vector (" << inputVector.size() << ") does not match the num features in the model (" << numInputDimensions << std::endl;
213  return false;
214  }
215 
216  if( useScaling ){
217  for(UINT n=0; n<numInputDimensions; n++){
218  inputVector[n] = scale(inputVector[n], ranges[n].minValue, ranges[n].maxValue, MIN_SCALE_VALUE, MAX_SCALE_VALUE);
219  }
220  }
221 
222  if( classLikelihoods.size() != numClasses ) classLikelihoods.resize(numClasses,0);
223  if( classDistances.size() != numClasses ) classDistances.resize(numClasses,0);
224 
225  Float classLikelihoodsSum = 0;
226  Float minDist = -99e+99;
227  for(UINT k=0; k<numClasses; k++){
228  classDistances[k] = models[k].predict( inputVector );
229 
230  //At this point the class likelihoods and class distances are the same thing
231  classLikelihoods[k] = classDistances[k];
232 
233  //If the distances are very far away then they could be -inf or nan so catch this so the sum still works
234  if( grt_isinf(classLikelihoods[k]) || grt_isnan(classLikelihoods[k]) ){
235  classLikelihoods[k] = 0;
236  }else{
237  classLikelihoods[k] = grt_exp( classLikelihoods[k] );
238  classLikelihoodsSum += classLikelihoods[k];
239 
240  //The loglikelihood values are negative so we want the values closest to 0
241  if( classDistances[k] > minDist ){
242  minDist = classDistances[k];
243  predictedClassLabel = k;
244  }
245  }
246  }
247 
248  //If the class likelihoods sum is zero then all classes are -INF
249  if( classLikelihoodsSum == 0 ){
250  predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
251  maxLikelihood = 0;
252  return true;
253  }
254 
255  //Normalize the classlikelihoods
256  for(UINT k=0; k<numClasses; k++){
257  classLikelihoods[k] /= classLikelihoodsSum;
258  }
259  maxLikelihood = classLikelihoods[predictedClassLabel];
260 
261  if( useNullRejection ){
262  //Check to see if the best result is greater than the models threshold
263  if( minDist >= models[predictedClassLabel].threshold ) predictedClassLabel = models[predictedClassLabel].classLabel;
264  else predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
265  }else predictedClassLabel = models[predictedClassLabel].classLabel;
266 
267  return true;
268 }
269 
271 
272  if( trained ){
273  if( nullRejectionThresholds.size() != numClasses )
274  nullRejectionThresholds.resize(numClasses);
275  for(UINT k=0; k<numClasses; k++) {
276  models[k].recomputeThresholdValue(nullRejectionCoeff);
277  nullRejectionThresholds[k] = models[k].threshold;
278  }
279  return true;
280  }
281  return false;
282 }
283 
284 bool ANBC::reset(){
285  return true;
286 }
287 
288 bool ANBC::clear(){
289 
290  //Clear the Classifier variables
292 
293  //Clear the ANBC model
294  weightsData.clear();
295  models.clear();
296 
297  return true;
298 }
299 
300 bool ANBC::saveModelToFile( std::fstream &file ) const{
301 
302  if(!file.is_open())
303  {
304  errorLog <<"saveModelToFile(fstream &file) - The file is not open!" << std::endl;
305  return false;
306  }
307 
308  //Write the header info
309  file<<"GRT_ANBC_MODEL_FILE_V2.0\n";
310 
311  //Write the classifier settings to the file
313  errorLog <<"saveModelToFile(fstream &file) - Failed to save classifier base settings to file!" << std::endl;
314  return false;
315  }
316 
317  if( trained ){
318  //Write each of the models
319  for(UINT k=0; k<numClasses; k++){
320  file << "*************_MODEL_*************\n";
321  file << "Model_ID: " << k+1 << std::endl;
322  file << "N: " << models[k].N << std::endl;
323  file << "ClassLabel: " << models[k].classLabel << std::endl;
324  file << "Threshold: " << models[k].threshold << std::endl;
325  file << "Gamma: " << models[k].gamma << std::endl;
326  file << "TrainingMu: " << models[k].trainingMu << std::endl;
327  file << "TrainingSigma: " << models[k].trainingSigma << std::endl;
328 
329  file<<"Mu:";
330  for(UINT j=0; j<models[k].N; j++){
331  file << "\t" << models[k].mu[j];
332  }file << std::endl;
333 
334  file<<"Sigma:";
335  for(UINT j=0; j<models[k].N; j++){
336  file << "\t" << models[k].sigma[j];
337  }file << std::endl;
338 
339  file<<"Weights:";
340  for(UINT j=0; j<models[k].N; j++){
341  file << "\t" << models[k].weights[j];
342  }file << std::endl;
343  }
344  }
345 
346  return true;
347 }
348 
349 bool ANBC::loadModelFromFile( std::fstream &file ){
350 
351  trained = false;
352  numInputDimensions = 0;
353  numClasses = 0;
354  models.clear();
355  classLabels.clear();
356 
357  if(!file.is_open())
358  {
359  errorLog << "loadModelFromFile(string filename) - Could not open file to load model" << std::endl;
360  return false;
361  }
362 
363  std::string word;
364  file >> word;
365 
366  //Check to see if we should load a legacy file
367  if( word == "GRT_ANBC_MODEL_FILE_V1.0" ){
368  return loadLegacyModelFromFile( file );
369  }
370 
371  //Find the file type header
372  if(word != "GRT_ANBC_MODEL_FILE_V2.0"){
373  errorLog << "loadModelFromFile(string filename) - Could not find Model File Header" << std::endl;
374  return false;
375  }
376 
377  //Load the base settings from the file
379  errorLog << "loadModelFromFile(string filename) - Failed to load base settings from file!" << std::endl;
380  return false;
381  }
382 
383  if( trained ){
384 
385  //Resize the buffer
386  models.resize(numClasses);
387 
388  //Load each of the K models
389  for(UINT k=0; k<numClasses; k++){
390  UINT modelID;
391  file >> word;
392  if(word != "*************_MODEL_*************"){
393  errorLog << "loadModelFromFile(string filename) - Could not find header for the "<<k+1<<"th model" << std::endl;
394  return false;
395  }
396 
397  file >> word;
398  if(word != "Model_ID:"){
399  errorLog << "loadModelFromFile(string filename) - Could not find model ID for the "<<k+1<<"th model" << std::endl;
400  return false;
401  }
402  file >> modelID;
403 
404  if(modelID-1!=k){
405  errorLog << "ANBC: Model ID does not match the current class ID for the "<<k+1<<"th model" << std::endl;
406  return false;
407  }
408 
409  file >> word;
410  if(word != "N:"){
411  errorLog << "ANBC: Could not find N for the "<<k+1<<"th model" << std::endl;
412  return false;
413  }
414  file >> models[k].N;
415 
416  file >> word;
417  if(word != "ClassLabel:"){
418  errorLog << "loadModelFromFile(string filename) - Could not find ClassLabel for the "<<k+1<<"th model" << std::endl;
419  return false;
420  }
421  file >> models[k].classLabel;
422  classLabels[k] = models[k].classLabel;
423 
424  file >> word;
425  if(word != "Threshold:"){
426  errorLog << "loadModelFromFile(string filename) - Could not find the threshold for the "<<k+1<<"th model" << std::endl;
427  return false;
428  }
429  file >> models[k].threshold;
430 
431  file >> word;
432  if(word != "Gamma:"){
433  errorLog << "loadModelFromFile(string filename) - Could not find the gamma parameter for the "<<k+1<<"th model" << std::endl;
434  return false;
435  }
436  file >> models[k].gamma;
437 
438  file >> word;
439  if(word != "TrainingMu:"){
440  errorLog << "loadModelFromFile(string filename) - Could not find the training mu parameter for the "<<k+1<<"th model" << std::endl;
441  return false;
442  }
443  file >> models[k].trainingMu;
444 
445  file >> word;
446  if(word != "TrainingSigma:"){
447  errorLog << "loadModelFromFile(string filename) - Could not find the training sigma parameter for the "<<k+1<<"th model" << std::endl;
448  return false;
449  }
450  file >> models[k].trainingSigma;
451 
452  //Resize the buffers
453  models[k].mu.resize(numInputDimensions);
454  models[k].sigma.resize(numInputDimensions);
455  models[k].weights.resize(numInputDimensions);
456 
457  //Load Mu, Sigma and Weights
458  file >> word;
459  if(word != "Mu:"){
460  errorLog << "loadModelFromFile(string filename) - Could not find the Mu vector for the "<<k+1<<"th model" << std::endl;
461  return false;
462  }
463 
464  //Load Mu
465  for(UINT j=0; j<models[k].N; j++){
466  Float value;
467  file >> value;
468  models[k].mu[j] = value;
469  }
470 
471  file >> word;
472  if(word != "Sigma:"){
473  errorLog << "loadModelFromFile(string filename) - Could not find the Sigma vector for the "<<k+1<<"th model" << std::endl;
474  return false;
475  }
476 
477  //Load Sigma
478  for(UINT j=0; j<models[k].N; j++){
479  Float value;
480  file >> value;
481  models[k].sigma[j] = value;
482  }
483 
484  file >> word;
485  if(word != "Weights:"){
486  errorLog << "loadModelFromFile(string filename) - Could not find the Weights vector for the "<<k+1<<"th model" << std::endl;
487  return false;
488  }
489 
490  //Load Weights
491  for(UINT j=0; j<models[k].N; j++){
492  Float value;
493  file >> value;
494  models[k].weights[j] = value;
495  }
496  }
497 
498  //Recompute the null rejection thresholds
500 
501  //Resize the prediction results to make sure it is setup for realtime prediction
502  maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
503  bestDistance = DEFAULT_NULL_DISTANCE_VALUE;
504  classLikelihoods.resize(numClasses,DEFAULT_NULL_LIKELIHOOD_VALUE);
505  classDistances.resize(numClasses,DEFAULT_NULL_DISTANCE_VALUE);
506  }
507 
508  return true;
509 }
510 
512  if( !trained ) return VectorFloat();
513  return nullRejectionThresholds;
514 }
515 
516 bool ANBC::setNullRejectionCoeff(Float nullRejectionCoeff){
517 
518  if( nullRejectionCoeff > 0 ){
519  this->nullRejectionCoeff = nullRejectionCoeff;
521  return true;
522  }
523  return false;
524 }
525 
526 bool ANBC::setWeights(const ClassificationData &weightsData){
527 
528  if( weightsData.getNumSamples() > 0 ){
529  weightsDataSet = true;
530  this->weightsData = weightsData;
531  return true;
532  }
533  return false;
534 }
535 
536 bool ANBC::loadLegacyModelFromFile( std::fstream &file ){
537 
538  std::string word;
539 
540  file >> word;
541  if(word != "NumFeatures:"){
542  errorLog << "loadANBCModelFromFile(string filename) - Could not find NumFeatures " << std::endl;
543  return false;
544  }
545  file >> numInputDimensions;
546 
547  file >> word;
548  if(word != "NumClasses:"){
549  errorLog << "loadANBCModelFromFile(string filename) - Could not find NumClasses" << std::endl;
550  return false;
551  }
552  file >> numClasses;
553 
554  file >> word;
555  if(word != "UseScaling:"){
556  errorLog << "loadANBCModelFromFile(string filename) - Could not find UseScaling" << std::endl;
557  return false;
558  }
559  file >> useScaling;
560 
561  file >> word;
562  if(word != "UseNullRejection:"){
563  errorLog << "loadANBCModelFromFile(string filename) - Could not find UseNullRejection" << std::endl;
564  return false;
565  }
566  file >> useNullRejection;
567 
569  if( useScaling ){
570  //Resize the ranges buffer
571  ranges.resize(numInputDimensions);
572 
573  file >> word;
574  if(word != "Ranges:"){
575  errorLog << "loadANBCModelFromFile(string filename) - Could not find the Ranges" << std::endl;
576  return false;
577  }
578  for(UINT n=0; n<ranges.size(); n++){
579  file >> ranges[n].minValue;
580  file >> ranges[n].maxValue;
581  }
582  }
583 
584  //Resize the buffer
585  models.resize(numClasses);
586  classLabels.resize(numClasses);
587 
588  //Load each of the K models
589  for(UINT k=0; k<numClasses; k++){
590  UINT modelID;
591  file >> word;
592  if(word != "*************_MODEL_*************"){
593  errorLog << "loadANBCModelFromFile(string filename) - Could not find header for the "<<k+1<<"th model" << std::endl;
594  return false;
595  }
596 
597  file >> word;
598  if(word != "Model_ID:"){
599  errorLog << "loadANBCModelFromFile(string filename) - Could not find model ID for the "<<k+1<<"th model" << std::endl;
600  return false;
601  }
602  file >> modelID;
603 
604  if(modelID-1!=k){
605  errorLog << "ANBC: Model ID does not match the current class ID for the "<<k+1<<"th model" << std::endl;
606  return false;
607  }
608 
609  file >> word;
610  if(word != "N:"){
611  errorLog << "ANBC: Could not find N for the "<<k+1<<"th model" << std::endl;
612  return false;
613  }
614  file >> models[k].N;
615 
616  file >> word;
617  if(word != "ClassLabel:"){
618  errorLog << "loadANBCModelFromFile(string filename) - Could not find ClassLabel for the "<<k+1<<"th model" << std::endl;
619  return false;
620  }
621  file >> models[k].classLabel;
622  classLabels[k] = models[k].classLabel;
623 
624  file >> word;
625  if(word != "Threshold:"){
626  errorLog << "loadANBCModelFromFile(string filename) - Could not find the threshold for the "<<k+1<<"th model" << std::endl;
627  return false;
628  }
629  file >> models[k].threshold;
630 
631  file >> word;
632  if(word != "Gamma:"){
633  errorLog << "loadANBCModelFromFile(string filename) - Could not find the gamma parameter for the "<<k+1<<"th model" << std::endl;
634  return false;
635  }
636  file >> models[k].gamma;
637 
638  file >> word;
639  if(word != "TrainingMu:"){
640  errorLog << "loadANBCModelFromFile(string filename) - Could not find the training mu parameter for the "<<k+1<<"th model" << std::endl;
641  return false;
642  }
643  file >> models[k].trainingMu;
644 
645  file >> word;
646  if(word != "TrainingSigma:"){
647  errorLog << "loadANBCModelFromFile(string filename) - Could not find the training sigma parameter for the "<<k+1<<"th model" << std::endl;
648  return false;
649  }
650  file >> models[k].trainingSigma;
651 
652  //Resize the buffers
653  models[k].mu.resize(numInputDimensions);
654  models[k].sigma.resize(numInputDimensions);
655  models[k].weights.resize(numInputDimensions);
656 
657  //Load Mu, Sigma and Weights
658  file >> word;
659  if(word != "Mu:"){
660  errorLog << "loadANBCModelFromFile(string filename) - Could not find the Mu vector for the "<<k+1<<"th model" << std::endl;
661  return false;
662  }
663 
664  //Load Mu
665  for(UINT j=0; j<models[k].N; j++){
666  Float value;
667  file >> value;
668  models[k].mu[j] = value;
669  }
670 
671  file >> word;
672  if(word != "Sigma:"){
673  errorLog << "loadANBCModelFromFile(string filename) - Could not find the Sigma vector for the "<<k+1<<"th model" << std::endl;
674  return false;
675  }
676 
677  //Load Sigma
678  for(UINT j=0; j<models[k].N; j++){
679  Float value;
680  file >> value;
681  models[k].sigma[j] = value;
682  }
683 
684  file >> word;
685  if(word != "Weights:"){
686  errorLog << "loadANBCModelFromFile(string filename) - Could not find the Weights vector for the "<<k+1<<"th model" << std::endl;
687  return false;
688  }
689 
690  //Load Weights
691  for(UINT j=0; j<models[k].N; j++){
692  Float value;
693  file >> value;
694  models[k].weights[j] = value;
695  }
696 
697  file >> word;
698  if(word != "*********************************"){
699  errorLog << "loadANBCModelFromFile(string filename) - Could not find the model footer for the "<<k+1<<"th model" << std::endl;
700  return false;
701  }
702  }
703 
704  //Flag that the model is trained
705  trained = true;
706 
707  //Recompute the null rejection thresholds
709 
710  //Resize the prediction results to make sure it is setup for realtime prediction
711  maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
712  bestDistance = DEFAULT_NULL_DISTANCE_VALUE;
713  classLikelihoods.resize(numClasses,DEFAULT_NULL_LIKELIHOOD_VALUE);
714  classDistances.resize(numClasses,DEFAULT_NULL_DISTANCE_VALUE);
715 
716  return true;
717 
718 }
719 
720 GRT_END_NAMESPACE
721 
bool saveBaseSettingsToFile(std::fstream &file) const
Definition: Classifier.cpp:255
virtual bool reset()
Definition: ANBC.cpp:284
#define DEFAULT_NULL_LIKELIHOOD_VALUE
Definition: Classifier.h:38
bool loadLegacyModelFromFile(std::fstream &file)
Definition: ANBC.cpp:536
Float scale(const Float &x, const Float &minSource, const Float &maxSource, const Float &minTarget, const Float &maxTarget, const bool constrain=false)
Definition: MLBase.h:339
virtual ~ANBC(void)
Definition: ANBC.cpp:55
std::string getClassifierType() const
Definition: Classifier.cpp:160
Vector< ClassTracker > getClassTracker() const
virtual bool deepCopyFrom(const Classifier *classifier)
Definition: ANBC.cpp:72
ClassificationData getClassData(const UINT classLabel) const
virtual bool resize(const unsigned int size)
Definition: Vector.h:133
bool setWeights(const ClassificationData &weightsData)
Definition: ANBC.cpp:526
virtual bool train(ClassificationData trainingData)
Definition: MLBase.cpp:88
#define MIN_SCALE_VALUE
Definition: ANBC.h:47
virtual bool loadModelFromFile(std::fstream &file)
Definition: ANBC.cpp:349
ANBC(bool useScaling=false, bool useNullRejection=false, double nullRejectionCoeff=10.0)
virtual bool recomputeNullRejectionThresholds()
Definition: ANBC.cpp:270
VectorFloat getNullRejectionThresholds() const
Definition: ANBC.cpp:511
This class implements the Adaptive Naive Bayes Classifier algorithm. The Adaptive Naive Bayes Classif...
virtual bool train_(ClassificationData &trainingData)
Definition: ANBC.cpp:90
UINT getNumSamples() const
ANBC & operator=(const ANBC &rhs)
Definition: ANBC.cpp:59
virtual bool clear()
Definition: ANBC.cpp:288
Definition: ANBC.h:50
bool copyBaseVariables(const Classifier *classifier)
Definition: Classifier.cpp:92
bool loadBaseSettingsFromFile(std::fstream &file)
Definition: Classifier.cpp:302
virtual bool predict_(VectorFloat &inputVector)
Definition: ANBC.cpp:199
UINT getNumDimensions() const
UINT getNumClasses() const
virtual bool saveModelToFile(std::fstream &file) const
Definition: ANBC.cpp:300
Vector< MinMax > getRanges() const
bool setNullRejectionCoeff(double nullRejectionCoeff)
Definition: ANBC.cpp:516
bool scale(const Float minTarget, const Float maxTarget)
virtual bool clear()
Definition: Classifier.cpp:141