GestureRecognitionToolkit  Version: 0.2.0
The Gesture Recognition Toolkit (GRT) is a cross-platform, open-source, c++ machine learning library for real-time gesture recognition.
ANBC.cpp
1 /*
2 GRT MIT License
3 Copyright (c) <2012> <Nicholas Gillian, Media Lab, MIT>
4 
5 Permission is hereby granted, free of charge, to any person obtaining a copy of this software
6 and associated documentation files (the "Software"), to deal in the Software without restriction,
7 including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
9 subject to the following conditions:
10 
11 The above copyright notice and this permission notice shall be included in all copies or substantial
12 portions of the Software.
13 
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
15 LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
16 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
17 WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
18 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19 */
20 
21 #define GRT_DLL_EXPORTS
22 #include "ANBC.h"
23 
24 GRT_BEGIN_NAMESPACE
25 
26 //Register the ANBC module with the Classifier base class
27 RegisterClassifierModule< ANBC > ANBC::registerModule("ANBC");
28 
29 ANBC::ANBC(bool useScaling,bool useNullRejection,Float nullRejectionCoeff)
30 {
31  this->useScaling = useScaling;
32  this->useNullRejection = useNullRejection;
33  this->nullRejectionCoeff = nullRejectionCoeff;
34  supportsNullRejection = true;
35  weightsDataSet = false;
36  classType = "ANBC";
37  classifierType = classType;
38  classifierMode = STANDARD_CLASSIFIER_MODE;
39  debugLog.setProceedingText("[DEBUG ANBC]");
40  errorLog.setProceedingText("[ERROR ANBC]");
41  trainingLog.setProceedingText("[TRAINING ANBC]");
42  warningLog.setProceedingText("[WARNING ANBC]");
43 }
44 
45 ANBC::ANBC(const ANBC &rhs){
46  classType = "ANBC";
47  classifierType = classType;
48  classifierMode = STANDARD_CLASSIFIER_MODE;
49  debugLog.setProceedingText("[DEBUG ANBC]");
50  errorLog.setProceedingText("[ERROR ANBC]");
51  trainingLog.setProceedingText("[TRAINING ANBC]");
52  warningLog.setProceedingText("[WARNING ANBC]");
53  *this = rhs;
54 }
55 
57 {
58 }
59 
60 ANBC& ANBC::operator=(const ANBC &rhs){
61  if( this != &rhs ){
62  //ANBC variables
63  this->weightsDataSet = rhs.weightsDataSet;
64  this->weightsData = rhs.weightsData;
65  this->models = rhs.models;
66 
67  //Classifier variables
68  copyBaseVariables( (Classifier*)&rhs );
69  }
70  return *this;
71 }
72 
73 bool ANBC::deepCopyFrom(const Classifier *classifier){
74 
75  if( classifier == NULL ) return false;
76 
77  if( this->getClassifierType() == classifier->getClassifierType() ){
78 
79  ANBC *ptr = (ANBC*)classifier;
80  //Clone the ANBC values
81  this->weightsDataSet = ptr->weightsDataSet;
82  this->weightsData = ptr->weightsData;
83  this->models = ptr->models;
84 
85  //Clone the classifier variables
86  return copyBaseVariables( classifier );
87  }
88  return false;
89 }
90 
91 bool ANBC::train_(ClassificationData &trainingData){
92 
93  //Clear any previous model
94  clear();
95 
96  const unsigned int M = trainingData.getNumSamples();
97  const unsigned int N = trainingData.getNumDimensions();
98  const unsigned int K = trainingData.getNumClasses();
99 
100  if( M == 0 ){
101  errorLog << "train_(ClassificationData &trainingData) - Training data has zero samples!" << std::endl;
102  return false;
103  }
104 
105  if( weightsDataSet ){
106  if( weightsData.getNumDimensions() != N ){
107  errorLog << "train_(ClassificationData &trainingData) - The number of dimensions in the weights data (" << weightsData.getNumDimensions() << ") is not equal to the number of dimensions of the training data (" << N << ")" << std::endl;
108  return false;
109  }
110  }
111 
112  numInputDimensions = N;
113  numClasses = K;
114  models.resize(K);
115  classLabels.resize(K);
116  ranges = trainingData.getRanges();
117 
118  //Scale the training data if needed
119  if( useScaling ){
120  //Scale the training data between 0 and 1
121  trainingData.scale(0, 1);
122  }
123 
124  //Train each of the models
125  for(UINT k=0; k<numClasses; k++){
126 
127  //Get the class label for the kth class
128  UINT classLabel = trainingData.getClassTracker()[k].classLabel;
129 
130  //Set the kth class label
131  classLabels[k] = classLabel;
132 
133  //Get the weights for this class
134  VectorFloat weights(numInputDimensions);
135  if( weightsDataSet ){
136  bool weightsFound = false;
137  for(UINT i=0; i<weightsData.getNumSamples(); i++){
138  if( weightsData[i].getClassLabel() == classLabel ){
139  weights = weightsData[i].getSample();
140  weightsFound = true;
141  break;
142  }
143  }
144 
145  if( !weightsFound ){
146  errorLog << "train_(ClassificationData &trainingData) - Failed to find the weights for class " << classLabel << std::endl;
147  return false;
148  }
149  }else{
150  //If the weights data has not been set then all the weights are 1
151  for(UINT j=0; j<numInputDimensions; j++) weights[j] = 1.0;
152  }
153 
154  //Get all the training data for this class
155  ClassificationData classData = trainingData.getClassData(classLabel);
156  MatrixFloat data(classData.getNumSamples(),N);
157 
158  //Copy the training data into a matrix
159  for(UINT i=0; i<data.getNumRows(); i++){
160  for(UINT j=0; j<data.getNumCols(); j++){
161  data[i][j] = classData[i][j];
162  }
163  }
164 
165  //Train the model for this class
166  models[k].gamma = nullRejectionCoeff;
167  if( !models[k].train( classLabel, data, weights ) ){
168  errorLog << "train_(ClassificationData &trainingData) - Failed to train model for class: " << classLabel << std::endl;
169 
170  //Try and work out why the training failed
171  if( models[k].N == 0 ){
172  errorLog << "train_(ClassificationData &trainingData) - N == 0!" << std::endl;
173  models.clear();
174  return false;
175  }
176  for(UINT j=0; j<numInputDimensions; j++){
177  if( models[k].sigma[j] == 0 ){
178  errorLog << "train_(ClassificationData &trainingData) - The standard deviation of column " << j+1 << " is zero! Check the training data" << std::endl;
179  models.clear();
180  return false;
181  }
182  }
183  models.clear();
184  return false;
185  }
186 
187  }
188 
189  //Store the null rejection thresholds
190  nullRejectionThresholds.resize(numClasses);
191  for(UINT k=0; k<numClasses; k++) {
192  nullRejectionThresholds[k] = models[k].threshold;
193  }
194 
195  //Flag that the models have been trained
196  trained = true;
197  return trained;
198 }
199 
200 bool ANBC::predict_(VectorFloat &inputVector){
201 
202  if( !trained ){
203  errorLog << "predict_(VectorFloat &inputVector) - ANBC Model Not Trained!" << std::endl;
204  return false;
205  }
206 
207  predictedClassLabel = 0;
208  maxLikelihood = -10000;
209 
210  if( !trained ) return false;
211 
212  if( inputVector.size() != numInputDimensions ){
213  errorLog << "predict_(VectorFloat &inputVector) - The size of the input vector (" << inputVector.size() << ") does not match the num features in the model (" << numInputDimensions << std::endl;
214  return false;
215  }
216 
217  if( useScaling ){
218  for(UINT n=0; n<numInputDimensions; n++){
219  inputVector[n] = scale(inputVector[n], ranges[n].minValue, ranges[n].maxValue, MIN_SCALE_VALUE, MAX_SCALE_VALUE);
220  }
221  }
222 
223  if( classLikelihoods.size() != numClasses ) classLikelihoods.resize(numClasses,0);
224  if( classDistances.size() != numClasses ) classDistances.resize(numClasses,0);
225 
226  Float classLikelihoodsSum = 0;
227  Float minDist = -99e+99;
228  for(UINT k=0; k<numClasses; k++){
229  classDistances[k] = models[k].predict( inputVector );
230 
231  //At this point the class likelihoods and class distances are the same thing
232  classLikelihoods[k] = classDistances[k];
233 
234  //If the distances are very far away then they could be -inf or nan so catch this so the sum still works
235  if( grt_isinf(classLikelihoods[k]) || grt_isnan(classLikelihoods[k]) ){
236  classLikelihoods[k] = 0;
237  }else{
238  classLikelihoods[k] = grt_exp( classLikelihoods[k] );
239  classLikelihoodsSum += classLikelihoods[k];
240 
241  //The loglikelihood values are negative so we want the values closest to 0
242  if( classDistances[k] > minDist ){
243  minDist = classDistances[k];
244  predictedClassLabel = k;
245  }
246  }
247  }
248 
249  //If the class likelihoods sum is zero then all classes are -INF
250  if( classLikelihoodsSum == 0 ){
251  predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
252  maxLikelihood = 0;
253  return true;
254  }
255 
256  //Normalize the classlikelihoods
257  for(UINT k=0; k<numClasses; k++){
258  classLikelihoods[k] /= classLikelihoodsSum;
259  }
260  maxLikelihood = classLikelihoods[predictedClassLabel];
261 
262  if( useNullRejection ){
263  //Check to see if the best result is greater than the models threshold
264  if( minDist >= models[predictedClassLabel].threshold ) predictedClassLabel = models[predictedClassLabel].classLabel;
265  else predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
266  }else predictedClassLabel = models[predictedClassLabel].classLabel;
267 
268  return true;
269 }
270 
272 
273  if( trained ){
274  if( nullRejectionThresholds.size() != numClasses )
275  nullRejectionThresholds.resize(numClasses);
276  for(UINT k=0; k<numClasses; k++) {
277  models[k].recomputeThresholdValue(nullRejectionCoeff);
278  nullRejectionThresholds[k] = models[k].threshold;
279  }
280  return true;
281  }
282  return false;
283 }
284 
285 bool ANBC::reset(){
286  return true;
287 }
288 
289 bool ANBC::clear(){
290 
291  //Clear the Classifier variables
293 
294  //Clear the ANBC model
295  weightsData.clear();
296  models.clear();
297 
298  return true;
299 }
300 
301 bool ANBC::save( std::fstream &file ) const{
302 
303  if(!file.is_open())
304  {
305  errorLog <<"save(fstream &file) - The file is not open!" << std::endl;
306  return false;
307  }
308 
309  //Write the header info
310  file<<"GRT_ANBC_MODEL_FILE_V2.0\n";
311 
312  //Write the classifier settings to the file
314  errorLog <<"save(fstream &file) - Failed to save classifier base settings to file!" << std::endl;
315  return false;
316  }
317 
318  if( trained ){
319  //Write each of the models
320  for(UINT k=0; k<numClasses; k++){
321  file << "*************_MODEL_*************\n";
322  file << "Model_ID: " << k+1 << std::endl;
323  file << "N: " << models[k].N << std::endl;
324  file << "ClassLabel: " << models[k].classLabel << std::endl;
325  file << "Threshold: " << models[k].threshold << std::endl;
326  file << "Gamma: " << models[k].gamma << std::endl;
327  file << "TrainingMu: " << models[k].trainingMu << std::endl;
328  file << "TrainingSigma: " << models[k].trainingSigma << std::endl;
329 
330  file<<"Mu:";
331  for(UINT j=0; j<models[k].N; j++){
332  file << "\t" << models[k].mu[j];
333  }file << std::endl;
334 
335  file<<"Sigma:";
336  for(UINT j=0; j<models[k].N; j++){
337  file << "\t" << models[k].sigma[j];
338  }file << std::endl;
339 
340  file<<"Weights:";
341  for(UINT j=0; j<models[k].N; j++){
342  file << "\t" << models[k].weights[j];
343  }file << std::endl;
344  }
345  }
346 
347  return true;
348 }
349 
350 bool ANBC::load( std::fstream &file ){
351 
352  trained = false;
353  numInputDimensions = 0;
354  numClasses = 0;
355  models.clear();
356  classLabels.clear();
357 
358  if(!file.is_open())
359  {
360  errorLog << "load(string filename) - Could not open file to load model" << std::endl;
361  return false;
362  }
363 
364  std::string word;
365  file >> word;
366 
367  //Check to see if we should load a legacy file
368  if( word == "GRT_ANBC_MODEL_FILE_V1.0" ){
369  return loadLegacyModelFromFile( file );
370  }
371 
372  //Find the file type header
373  if(word != "GRT_ANBC_MODEL_FILE_V2.0"){
374  errorLog << "load(string filename) - Could not find Model File Header" << std::endl;
375  return false;
376  }
377 
378  //Load the base settings from the file
380  errorLog << "load(string filename) - Failed to load base settings from file!" << std::endl;
381  return false;
382  }
383 
384  if( trained ){
385 
386  //Resize the buffer
387  models.resize(numClasses);
388 
389  //Load each of the K models
390  for(UINT k=0; k<numClasses; k++){
391  UINT modelID;
392  file >> word;
393  if(word != "*************_MODEL_*************"){
394  errorLog << "load(string filename) - Could not find header for the "<<k+1<<"th model" << std::endl;
395  return false;
396  }
397 
398  file >> word;
399  if(word != "Model_ID:"){
400  errorLog << "load(string filename) - Could not find model ID for the "<<k+1<<"th model" << std::endl;
401  return false;
402  }
403  file >> modelID;
404 
405  if(modelID-1!=k){
406  errorLog << "ANBC: Model ID does not match the current class ID for the "<<k+1<<"th model" << std::endl;
407  return false;
408  }
409 
410  file >> word;
411  if(word != "N:"){
412  errorLog << "ANBC: Could not find N for the "<<k+1<<"th model" << std::endl;
413  return false;
414  }
415  file >> models[k].N;
416 
417  file >> word;
418  if(word != "ClassLabel:"){
419  errorLog << "load(string filename) - Could not find ClassLabel for the "<<k+1<<"th model" << std::endl;
420  return false;
421  }
422  file >> models[k].classLabel;
423  classLabels[k] = models[k].classLabel;
424 
425  file >> word;
426  if(word != "Threshold:"){
427  errorLog << "load(string filename) - Could not find the threshold for the "<<k+1<<"th model" << std::endl;
428  return false;
429  }
430  file >> models[k].threshold;
431 
432  file >> word;
433  if(word != "Gamma:"){
434  errorLog << "load(string filename) - Could not find the gamma parameter for the "<<k+1<<"th model" << std::endl;
435  return false;
436  }
437  file >> models[k].gamma;
438 
439  file >> word;
440  if(word != "TrainingMu:"){
441  errorLog << "load(string filename) - Could not find the training mu parameter for the "<<k+1<<"th model" << std::endl;
442  return false;
443  }
444  file >> models[k].trainingMu;
445 
446  file >> word;
447  if(word != "TrainingSigma:"){
448  errorLog << "load(string filename) - Could not find the training sigma parameter for the "<<k+1<<"th model" << std::endl;
449  return false;
450  }
451  file >> models[k].trainingSigma;
452 
453  //Resize the buffers
454  models[k].mu.resize(numInputDimensions);
455  models[k].sigma.resize(numInputDimensions);
456  models[k].weights.resize(numInputDimensions);
457 
458  //Load Mu, Sigma and Weights
459  file >> word;
460  if(word != "Mu:"){
461  errorLog << "load(string filename) - Could not find the Mu vector for the "<<k+1<<"th model" << std::endl;
462  return false;
463  }
464 
465  //Load Mu
466  for(UINT j=0; j<models[k].N; j++){
467  Float value;
468  file >> value;
469  models[k].mu[j] = value;
470  }
471 
472  file >> word;
473  if(word != "Sigma:"){
474  errorLog << "load(string filename) - Could not find the Sigma vector for the "<<k+1<<"th model" << std::endl;
475  return false;
476  }
477 
478  //Load Sigma
479  for(UINT j=0; j<models[k].N; j++){
480  Float value;
481  file >> value;
482  models[k].sigma[j] = value;
483  }
484 
485  file >> word;
486  if(word != "Weights:"){
487  errorLog << "load(string filename) - Could not find the Weights vector for the "<<k+1<<"th model" << std::endl;
488  return false;
489  }
490 
491  //Load Weights
492  for(UINT j=0; j<models[k].N; j++){
493  Float value;
494  file >> value;
495  models[k].weights[j] = value;
496  }
497  }
498 
499  //Recompute the null rejection thresholds
501 
502  //Resize the prediction results to make sure it is setup for realtime prediction
503  maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
504  bestDistance = DEFAULT_NULL_DISTANCE_VALUE;
505  classLikelihoods.resize(numClasses,DEFAULT_NULL_LIKELIHOOD_VALUE);
506  classDistances.resize(numClasses,DEFAULT_NULL_DISTANCE_VALUE);
507  }
508 
509  return true;
510 }
511 
513  if( !trained ) return VectorFloat();
514  return nullRejectionThresholds;
515 }
516 
517 bool ANBC::setNullRejectionCoeff(Float nullRejectionCoeff){
518 
519  if( nullRejectionCoeff > 0 ){
520  this->nullRejectionCoeff = nullRejectionCoeff;
522  return true;
523  }
524  return false;
525 }
526 
527 bool ANBC::setWeights(const ClassificationData &weightsData){
528 
529  if( weightsData.getNumSamples() > 0 ){
530  weightsDataSet = true;
531  this->weightsData = weightsData;
532  return true;
533  }
534  return false;
535 }
536 
537 bool ANBC::loadLegacyModelFromFile( std::fstream &file ){
538 
539  std::string word;
540 
541  file >> word;
542  if(word != "NumFeatures:"){
543  errorLog << "loadANBCModelFromFile(string filename) - Could not find NumFeatures " << std::endl;
544  return false;
545  }
546  file >> numInputDimensions;
547 
548  file >> word;
549  if(word != "NumClasses:"){
550  errorLog << "loadANBCModelFromFile(string filename) - Could not find NumClasses" << std::endl;
551  return false;
552  }
553  file >> numClasses;
554 
555  file >> word;
556  if(word != "UseScaling:"){
557  errorLog << "loadANBCModelFromFile(string filename) - Could not find UseScaling" << std::endl;
558  return false;
559  }
560  file >> useScaling;
561 
562  file >> word;
563  if(word != "UseNullRejection:"){
564  errorLog << "loadANBCModelFromFile(string filename) - Could not find UseNullRejection" << std::endl;
565  return false;
566  }
567  file >> useNullRejection;
568 
570  if( useScaling ){
571  //Resize the ranges buffer
572  ranges.resize(numInputDimensions);
573 
574  file >> word;
575  if(word != "Ranges:"){
576  errorLog << "loadANBCModelFromFile(string filename) - Could not find the Ranges" << std::endl;
577  return false;
578  }
579  for(UINT n=0; n<ranges.size(); n++){
580  file >> ranges[n].minValue;
581  file >> ranges[n].maxValue;
582  }
583  }
584 
585  //Resize the buffer
586  models.resize(numClasses);
587  classLabels.resize(numClasses);
588 
589  //Load each of the K models
590  for(UINT k=0; k<numClasses; k++){
591  UINT modelID;
592  file >> word;
593  if(word != "*************_MODEL_*************"){
594  errorLog << "loadANBCModelFromFile(string filename) - Could not find header for the "<<k+1<<"th model" << std::endl;
595  return false;
596  }
597 
598  file >> word;
599  if(word != "Model_ID:"){
600  errorLog << "loadANBCModelFromFile(string filename) - Could not find model ID for the "<<k+1<<"th model" << std::endl;
601  return false;
602  }
603  file >> modelID;
604 
605  if(modelID-1!=k){
606  errorLog << "ANBC: Model ID does not match the current class ID for the "<<k+1<<"th model" << std::endl;
607  return false;
608  }
609 
610  file >> word;
611  if(word != "N:"){
612  errorLog << "ANBC: Could not find N for the "<<k+1<<"th model" << std::endl;
613  return false;
614  }
615  file >> models[k].N;
616 
617  file >> word;
618  if(word != "ClassLabel:"){
619  errorLog << "loadANBCModelFromFile(string filename) - Could not find ClassLabel for the "<<k+1<<"th model" << std::endl;
620  return false;
621  }
622  file >> models[k].classLabel;
623  classLabels[k] = models[k].classLabel;
624 
625  file >> word;
626  if(word != "Threshold:"){
627  errorLog << "loadANBCModelFromFile(string filename) - Could not find the threshold for the "<<k+1<<"th model" << std::endl;
628  return false;
629  }
630  file >> models[k].threshold;
631 
632  file >> word;
633  if(word != "Gamma:"){
634  errorLog << "loadANBCModelFromFile(string filename) - Could not find the gamma parameter for the "<<k+1<<"th model" << std::endl;
635  return false;
636  }
637  file >> models[k].gamma;
638 
639  file >> word;
640  if(word != "TrainingMu:"){
641  errorLog << "loadANBCModelFromFile(string filename) - Could not find the training mu parameter for the "<<k+1<<"th model" << std::endl;
642  return false;
643  }
644  file >> models[k].trainingMu;
645 
646  file >> word;
647  if(word != "TrainingSigma:"){
648  errorLog << "loadANBCModelFromFile(string filename) - Could not find the training sigma parameter for the "<<k+1<<"th model" << std::endl;
649  return false;
650  }
651  file >> models[k].trainingSigma;
652 
653  //Resize the buffers
654  models[k].mu.resize(numInputDimensions);
655  models[k].sigma.resize(numInputDimensions);
656  models[k].weights.resize(numInputDimensions);
657 
658  //Load Mu, Sigma and Weights
659  file >> word;
660  if(word != "Mu:"){
661  errorLog << "loadANBCModelFromFile(string filename) - Could not find the Mu vector for the "<<k+1<<"th model" << std::endl;
662  return false;
663  }
664 
665  //Load Mu
666  for(UINT j=0; j<models[k].N; j++){
667  Float value;
668  file >> value;
669  models[k].mu[j] = value;
670  }
671 
672  file >> word;
673  if(word != "Sigma:"){
674  errorLog << "loadANBCModelFromFile(string filename) - Could not find the Sigma vector for the "<<k+1<<"th model" << std::endl;
675  return false;
676  }
677 
678  //Load Sigma
679  for(UINT j=0; j<models[k].N; j++){
680  Float value;
681  file >> value;
682  models[k].sigma[j] = value;
683  }
684 
685  file >> word;
686  if(word != "Weights:"){
687  errorLog << "loadANBCModelFromFile(string filename) - Could not find the Weights vector for the "<<k+1<<"th model" << std::endl;
688  return false;
689  }
690 
691  //Load Weights
692  for(UINT j=0; j<models[k].N; j++){
693  Float value;
694  file >> value;
695  models[k].weights[j] = value;
696  }
697 
698  file >> word;
699  if(word != "*********************************"){
700  errorLog << "loadANBCModelFromFile(string filename) - Could not find the model footer for the "<<k+1<<"th model" << std::endl;
701  return false;
702  }
703  }
704 
705  //Flag that the model is trained
706  trained = true;
707 
708  //Recompute the null rejection thresholds
710 
711  //Resize the prediction results to make sure it is setup for realtime prediction
712  maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
713  bestDistance = DEFAULT_NULL_DISTANCE_VALUE;
714  classLikelihoods.resize(numClasses,DEFAULT_NULL_LIKELIHOOD_VALUE);
715  classDistances.resize(numClasses,DEFAULT_NULL_DISTANCE_VALUE);
716 
717  return true;
718 
719 }
720 
721 GRT_END_NAMESPACE
bool saveBaseSettingsToFile(std::fstream &file) const
Definition: Classifier.cpp:256
virtual bool reset()
Definition: ANBC.cpp:285
#define DEFAULT_NULL_LIKELIHOOD_VALUE
Definition: Classifier.h:38
bool loadLegacyModelFromFile(std::fstream &file)
Definition: ANBC.cpp:537
Float scale(const Float &x, const Float &minSource, const Float &maxSource, const Float &minTarget, const Float &maxTarget, const bool constrain=false)
Definition: MLBase.h:353
virtual ~ANBC(void)
Definition: ANBC.cpp:56
std::string getClassifierType() const
Definition: Classifier.cpp:161
Vector< ClassTracker > getClassTracker() const
virtual bool load(std::fstream &file)
Definition: ANBC.cpp:350
virtual bool deepCopyFrom(const Classifier *classifier)
Definition: ANBC.cpp:73
ClassificationData getClassData(const UINT classLabel) const
virtual bool resize(const unsigned int size)
Definition: Vector.h:133
bool setWeights(const ClassificationData &weightsData)
Definition: ANBC.cpp:527
virtual bool train(ClassificationData trainingData)
Definition: MLBase.cpp:89
#define MIN_SCALE_VALUE
Definition: ANBC.h:47
ANBC(bool useScaling=false, bool useNullRejection=false, double nullRejectionCoeff=10.0)
virtual bool recomputeNullRejectionThresholds()
Definition: ANBC.cpp:271
VectorFloat getNullRejectionThresholds() const
Definition: ANBC.cpp:512
This class implements the Adaptive Naive Bayes Classifier algorithm. The Adaptive Naive Bayes Classif...
virtual bool train_(ClassificationData &trainingData)
Definition: ANBC.cpp:91
UINT getNumSamples() const
ANBC & operator=(const ANBC &rhs)
Definition: ANBC.cpp:60
virtual bool clear()
Definition: ANBC.cpp:289
Definition: ANBC.h:50
bool copyBaseVariables(const Classifier *classifier)
Definition: Classifier.cpp:93
bool loadBaseSettingsFromFile(std::fstream &file)
Definition: Classifier.cpp:303
virtual bool predict_(VectorFloat &inputVector)
Definition: ANBC.cpp:200
UINT getNumDimensions() const
UINT getNumClasses() const
Vector< MinMax > getRanges() const
bool setNullRejectionCoeff(double nullRejectionCoeff)
Definition: ANBC.cpp:517
bool scale(const Float minTarget, const Float maxTarget)
virtual bool clear()
Definition: Classifier.cpp:142
virtual bool save(std::fstream &file) const
Definition: ANBC.cpp:301