25 const Float MLP_NEURON_MIN_TARGET = -1.0;
26 const Float MLP_NEURON_MAX_TARGET = 1.0;
32 inputLayerActivationFunction = Neuron::LINEAR;
33 hiddenLayerActivationFunction = Neuron::LINEAR;
34 outputLayerActivationFunction = Neuron::LINEAR;
36 numRandomTrainingIterations = 10;
37 validationSetSize = 20;
38 trainingMode = ONLINE_GRADIENT_DESCENT;
42 nullRejectionCoeff = 0.9;
43 nullRejectionThreshold = 0;
44 useValidationSet =
true;
45 randomiseTrainingOrder =
false;
49 classificationModeActive =
false;
50 useNullRejection =
true;
53 regressifierType = classType;
54 debugLog.setProceedingText(
"[DEBUG MLP]");
55 errorLog.setProceedingText(
"[ERROR MLP]");
56 trainingLog.setProceedingText(
"[TRAINING MLP]");
57 warningLog.setProceedingText(
"[WARNING MLP]");
62 regressifierType = classType;
63 debugLog.setProceedingText(
"[DEBUG MLP]");
64 errorLog.setProceedingText(
"[ERROR MLP]");
65 trainingLog.setProceedingText(
"[TRAINING MLP]");
66 warningLog.setProceedingText(
"[WARNING MLP]");
78 this->numInputNeurons = rhs.numInputNeurons;
79 this->numHiddenNeurons = rhs.numHiddenNeurons;
80 this->numOutputNeurons = rhs.numOutputNeurons;
81 this->inputLayerActivationFunction = rhs.inputLayerActivationFunction;
82 this->hiddenLayerActivationFunction = rhs.hiddenLayerActivationFunction;
83 this->outputLayerActivationFunction = rhs.outputLayerActivationFunction;
84 this->numRandomTrainingIterations = rhs.numRandomTrainingIterations;
85 this->trainingMode = rhs.trainingMode;
86 this->momentum = rhs.momentum;
87 this->trainingError = rhs.trainingError;
88 this->gamma = rhs.gamma;
89 this->initialized = rhs.initialized;
90 this->inputLayer = rhs.inputLayer;
91 this->hiddenLayer = rhs.hiddenLayer;
92 this->outputLayer = rhs.outputLayer;
93 this->inputVectorRanges = rhs.inputVectorRanges;
94 this->targetVectorRanges = rhs.targetVectorRanges;
95 this->trainingErrorLog = rhs.trainingErrorLog;
97 this->classificationModeActive = rhs.classificationModeActive;
98 this->useNullRejection = rhs.useNullRejection;
99 this->predictedClassLabel = rhs.predictedClassLabel;
100 this->nullRejectionCoeff = rhs.nullRejectionCoeff;
101 this->nullRejectionThreshold = rhs.nullRejectionThreshold;
102 this->maxLikelihood = rhs.maxLikelihood;
103 this->classLikelihoods = rhs.classLikelihoods;
113 if( regressifier == NULL ){
114 errorLog <<
"deepCopyFrom(const Regressifier *regressifier) - regressifier is NULL!" << std::endl;
119 errorLog <<
"deepCopyFrom(const Regressifier *regressifier) - regressifier is not the correct type!" << std::endl;
123 *
this = *
dynamic_cast<const MLP*
>(regressifier);
132 errorLog <<
"train_(ClassificationData trainingData) - The MLP has not been initialized!" << std::endl;
137 errorLog <<
"train_(ClassificationData trainingData) - The number of input dimensions in the training data (" << trainingData.
getNumDimensions() <<
") does not match that of the MLP (" << numInputNeurons <<
")" << std::endl;
141 errorLog <<
"train_(ClassificationData trainingData) - The number of classes in the training data (" << trainingData.
getNumClasses() <<
") does not match that of the MLP (" << numOutputNeurons <<
")" << std::endl;
149 classificationModeActive =
true;
151 return trainModel(regressionData);
157 classificationModeActive =
false;
159 return trainModel(trainingData);
166 errorLog <<
"predict_(VectorFloat &inputVector) - Model not trained!" << std::endl;
170 if( inputVector.size() != numInputNeurons ){
171 errorLog <<
"predict_(VectorFloat &inputVector) - The sie of the input Vector (" << int(inputVector.size()) <<
") does not match that of the number of input dimensions (" << numInputNeurons <<
") " << std::endl;
178 if( classificationModeActive ){
181 const UINT K = (UINT)regressionData.size();
182 classLikelihoods = regressionData;
186 for(UINT i=0; i<K; i++){
187 classLikelihoods[i] += minValue;
193 for(UINT i=0; i<K; i++){
194 classLikelihoods[i] /= sum;
199 Float bestValue = classLikelihoods[0];
201 for(UINT i=1; i<K; i++){
202 if( classLikelihoods[i] > bestValue ){
203 bestValue = classLikelihoods[i];
209 maxLikelihood = bestValue;
210 predictedClassLabel = bestIndex+1;
212 if( useNullRejection ){
213 if( maxLikelihood < nullRejectionCoeff ){
214 predictedClassLabel = 0;
222 bool MLP::init(
const UINT numInputNeurons,
const UINT numHiddenNeurons,
const UINT numOutputNeurons){
223 return init(numInputNeurons, numHiddenNeurons, numOutputNeurons, inputLayerActivationFunction, hiddenLayerActivationFunction, outputLayerActivationFunction );
227 const UINT numHiddenNeurons,
228 const UINT numOutputNeurons,
229 const UINT inputLayerActivationFunction,
230 const UINT hiddenLayerActivationFunction,
231 const UINT outputLayerActivationFunction){
237 random.
setSeed( (UINT)time(NULL) );
239 if( numInputNeurons == 0 || numHiddenNeurons == 0 || numOutputNeurons == 0 ){
240 if( numInputNeurons == 0 ){ errorLog <<
"init(...) - The number of input neurons is zero!" << std::endl; }
241 if( numHiddenNeurons == 0 ){ errorLog <<
"init(...) - The number of hidden neurons is zero!" << std::endl; }
242 if( numOutputNeurons == 0 ){ errorLog <<
"init(...) - The number of output neurons is zero!" << std::endl; }
248 errorLog <<
"init(...) - One Of The Activation Functions Failed The Validation Check" << std::endl;
253 this->numInputNeurons = numInputNeurons;
254 this->numHiddenNeurons = numHiddenNeurons;
255 this->numOutputNeurons = numOutputNeurons;
258 this->numInputDimensions = numInputNeurons;
259 this->numOutputDimensions = numOutputNeurons;
262 this->inputLayerActivationFunction = inputLayerActivationFunction;
263 this->hiddenLayerActivationFunction = hiddenLayerActivationFunction;
264 this->outputLayerActivationFunction = outputLayerActivationFunction;
267 inputLayer.
resize(numInputNeurons);
268 hiddenLayer.
resize(numHiddenNeurons);
269 outputLayer.
resize(numOutputNeurons);
272 for(UINT i=0; i<numInputNeurons; i++){
273 inputLayer[i].init(1,inputLayerActivationFunction);
274 inputLayer[i].weights[0] = 1.0;
275 inputLayer[i].bias = 0.0;
276 inputLayer[i].gamma = gamma;
279 for(UINT i=0; i<numHiddenNeurons; i++){
281 hiddenLayer[i].init(numInputNeurons,hiddenLayerActivationFunction);
282 hiddenLayer[i].gamma = gamma;
285 for(UINT i=0; i<numOutputNeurons; i++){
287 outputLayer[i].init(numHiddenNeurons,outputLayerActivationFunction);
288 outputLayer[i].gamma = gamma;
302 numHiddenNeurons = 0;
303 numOutputNeurons = 0;
322 errorLog <<
"train(RegressionData trainingData) - The MLP has not be initialized!" << std::endl;
327 errorLog <<
"train(RegressionData trainingData) - The training data is empty!" << std::endl;
333 if( useValidationSet ){
334 validationData = trainingData.
partition( 100 - validationSetSize );
340 if( N != numInputNeurons ){
341 errorLog <<
"train(LabelledRegressionData trainingData) - The number of input dimensions in the training data (" << N <<
") does not match that of the MLP (" << numInputNeurons <<
")" << std::endl;
344 if( T != numOutputNeurons ){
345 errorLog <<
"train(LabelledRegressionData trainingData) - The number of target dimensions in the training data (" << T <<
") does not match that of the MLP (" << numOutputNeurons <<
")" << std::endl;
350 numInputDimensions = numInputNeurons;
351 numOutputDimensions = numOutputNeurons;
362 trainingData.
scale(inputVectorRanges,targetVectorRanges,MLP_NEURON_MIN_TARGET,MLP_NEURON_MAX_TARGET);
364 if( useValidationSet ){
365 validationData.
scale(inputVectorRanges,targetVectorRanges,MLP_NEURON_MIN_TARGET,MLP_NEURON_MAX_TARGET);
370 bool tempScalingState = useScaling;
374 trainingErrorLog.clear();
375 inputNeuronsOuput.
resize(numInputNeurons);
376 hiddenNeuronsOutput.
resize(numHiddenNeurons);
377 outputNeuronsOutput.
resize(numOutputNeurons);
378 deltaO.
resize(numOutputNeurons);
379 deltaH.
resize(numHiddenNeurons);
382 switch( trainingMode ){
383 case ONLINE_GRADIENT_DESCENT:
384 if( classificationModeActive ){
385 trained = trainOnlineGradientDescentClassification( trainingData, validationData );
387 trained = trainOnlineGradientDescentRegression( trainingData, validationData );
391 useScaling = tempScalingState;
392 errorLog <<
"train(RegressionData trainingData) - Uknown training mode!" << std::endl;
398 useScaling = tempScalingState;
407 const UINT numTestingExamples = useValidationSet ? validationData.
getNumSamples() : M;
411 totalSquaredTrainingError = 0;
412 rootMeanSquaredTrainingError = 0;
414 bool keepTraining =
true;
419 Float alpha = learningRate;
420 Float beta = momentum;
424 Float trainingSetAccuracy = 0;
425 Float trainingSetTotalSquaredError = 0;
429 Float bestAccuracy = 0;
431 Float backPropError = 0;
436 TrainingResult result;
437 trainingResults.reserve(M);
440 for(UINT i=0; i<M; i++) indexList[i] = i;
442 for(UINT iter=0; iter<numRandomTrainingIterations; iter++){
446 tempTrainingErrorLog.clear();
449 init(numInputNeurons,numHiddenNeurons,numOutputNeurons,inputLayerActivationFunction,hiddenLayerActivationFunction,outputLayerActivationFunction);
451 if( randomiseTrainingOrder ){
452 for(UINT i=0; i<M; i++){
457 while( keepTraining ){
461 totalSquaredTrainingError = 0;
463 for(UINT i=0; i<M; i++){
465 const VectorFloat &trainingExample = trainingData[ indexList[i] ].getInputVector();
466 const VectorFloat &targetVector = trainingData[ indexList[i] ].getTargetVector();
469 backPropError =
back_prop(trainingExample,targetVector,alpha,beta);
473 if( isNAN(backPropError) ){
474 keepTraining =
false;
475 errorLog <<
"train(RegressionData trainingData) - NaN found!" << std::endl;
480 if( classificationModeActive ){
484 bestValue = targetVector[0];
486 for(UINT i=1; i<targetVector.size(); i++){
487 if( targetVector[i] > bestValue ){
488 bestValue = targetVector[i];
492 classLabel = bestIndex + 1;
497 for(UINT i=1; i<numOutputNeurons; i++){
498 if( y[i] > bestValue ){
503 predictedClassLabel = bestIndex+1;
505 if( classLabel == predictedClassLabel ){
510 totalSquaredTrainingError += backPropError;
515 keepTraining =
false;
516 errorLog <<
"train(RegressionData trainingData) - NaN found!" << std::endl;
521 if( useValidationSet ){
522 trainingSetAccuracy = accuracy;
523 trainingSetTotalSquaredError = totalSquaredTrainingError;
525 totalSquaredTrainingError = 0;
529 for(UINT i=0; i<numValidationSamples; i++){
530 const VectorFloat &inputVector = validationData[i].getInputVector();
531 const VectorFloat &targetVector = validationData[i].getTargetVector();
535 if( classificationModeActive ){
537 bestValue = targetVector[0];
539 for(UINT i=1; i<numInputNeurons; i++){
540 if( targetVector[i] > bestValue ){
541 bestValue = targetVector[i];
545 classLabel = bestIndex + 1;
550 for(UINT i=1; i<numOutputNeurons; i++){
551 if( y[i] > bestValue ){
556 predictedClassLabel = bestIndex+1;
558 if( classLabel == predictedClassLabel ){
564 for(UINT j=0; j<T; j++){
565 totalSquaredTrainingError += SQR( targetVector[j]-y[j] );
570 accuracy = (accuracy/Float(numValidationSamples))*Float(numValidationSamples);
571 rootMeanSquaredTrainingError = sqrt( totalSquaredTrainingError / Float(numValidationSamples) );
574 accuracy = (accuracy/Float(M))*Float(M);
575 rootMeanSquaredTrainingError = sqrt( totalSquaredTrainingError / Float(M) );
580 temp[0] = 100.0 - trainingSetAccuracy;
581 temp[1] = 100.0 - accuracy;
582 tempTrainingErrorLog.push_back( temp );
584 error = 100.0 - accuracy;
587 result.setClassificationResult(iter,accuracy,
this);
588 trainingResults.push_back( result );
590 delta = fabs( error - lastError );
592 trainingLog <<
"Random Training Iteration: " << iter+1 <<
" Epoch: " << epoch <<
" Error: " << error <<
" Delta: " << delta << std::endl;
595 if( ++epoch >= maxNumEpochs ){
596 keepTraining =
false;
598 if( delta <= minChange && epoch >= minNumEpochs ){
599 keepTraining =
false;
606 trainingResultsObserverManager.notifyObservers( result );
610 if( lastError < bestError ){
612 bestError = lastError;
613 bestTSError = totalSquaredTrainingError;
614 bestRMSError = rootMeanSquaredTrainingError;
615 bestAccuracy = accuracy;
617 trainingErrorLog = tempTrainingErrorLog;
622 trainingLog <<
"Best Accuracy: " << bestAccuracy <<
" in Random Training Iteration: " << bestIter+1 << std::endl;
626 errorLog <<
"train(LabelledRegressionData trainingData) - NAN Found!" << std::endl;
632 trainingError = bestAccuracy;
635 if( useNullRejection ){
637 Float averageValue = 0;
638 VectorFloat classificationPredictions, inputVector, targetVector;
640 for(UINT i=0; i<numTestingExamples; i++){
641 inputVector = useValidationSet ? validationData[i].getInputVector() : trainingData[i].getInputVector();
642 targetVector = useValidationSet ? validationData[i].getTargetVector() : trainingData[i].getTargetVector();
648 bestValue = targetVector[0];
650 for(UINT i=1; i<targetVector.size(); i++){
651 if( targetVector[i] > bestValue ){
652 bestValue = targetVector[i];
656 classLabel = bestIndex + 1;
661 for(UINT i=1; i<y.size(); i++){
662 if( y[i] > bestValue ){
667 predictedClassLabel = bestIndex+1;
670 if( classLabel == predictedClassLabel ){
671 classificationPredictions.push_back( bestValue );
672 averageValue += bestValue;
676 averageValue /= Float(classificationPredictions.size());
678 for(UINT i=0; i<classificationPredictions.size(); i++){
679 stdDev += SQR(classificationPredictions[i]-averageValue);
681 stdDev = sqrt( stdDev / Float(classificationPredictions.size()-1) );
683 nullRejectionThreshold = averageValue-(stdDev*nullRejectionCoeff);
694 const UINT numValidationSamples = useValidationSet ? validationData.
getNumSamples() : M;
697 bool keepTraining =
true;
699 Float alpha = learningRate;
700 Float beta = momentum;
703 totalSquaredTrainingError = 0;
704 rootMeanSquaredTrainingError = 0;
708 Float trainingSetTotalSquaredError = 0;
715 TrainingResult result;
716 trainingResults.reserve(M);
719 for(UINT i=0; i<M; i++) indexList[i] = i;
721 for(UINT iter=0; iter<numRandomTrainingIterations; iter++){
725 tempTrainingErrorLog.clear();
728 init(numInputNeurons,numHiddenNeurons,numOutputNeurons,inputLayerActivationFunction,hiddenLayerActivationFunction,outputLayerActivationFunction);
730 if( randomiseTrainingOrder ){
731 for(UINT i=0; i<M; i++){
736 while( keepTraining ){
739 totalSquaredTrainingError = 0;
741 for(UINT i=0; i<M; i++){
743 const VectorFloat &trainingExample = trainingData[ indexList[i] ].getInputVector();
744 const VectorFloat &targetVector = trainingData[ indexList[i] ].getTargetVector();
747 Float backPropError =
back_prop(trainingExample,targetVector,alpha,beta);
751 if( isNAN(backPropError) ){
752 keepTraining =
false;
753 errorLog <<
"train(RegressionData trainingData) - NaN found!" << std::endl;
757 totalSquaredTrainingError += backPropError;
761 keepTraining =
false;
762 errorLog <<
"train(RegressionData trainingData) - NaN found!" << std::endl;
767 if( useValidationSet ){
768 trainingSetTotalSquaredError = totalSquaredTrainingError;
769 totalSquaredTrainingError = 0;
772 for(UINT i=0; i<numValidationSamples; i++){
773 const VectorFloat &trainingExample = validationData[i].getInputVector();
774 const VectorFloat &targetVector = validationData[i].getTargetVector();
779 for(UINT j=0; j<T; j++){
780 totalSquaredTrainingError += SQR( targetVector[j]-y[j] );
785 rootMeanSquaredTrainingError = sqrt( totalSquaredTrainingError / Float(numValidationSamples) );
788 rootMeanSquaredTrainingError = sqrt( totalSquaredTrainingError / Float(M) );
793 temp[0] = trainingSetTotalSquaredError;
794 temp[1] = rootMeanSquaredTrainingError;
795 tempTrainingErrorLog.push_back( temp );
797 error = rootMeanSquaredTrainingError;
800 result.setRegressionResult(iter,totalSquaredTrainingError,rootMeanSquaredTrainingError,
this);
801 trainingResults.push_back( result );
803 delta = fabs( error - lastError );
805 trainingLog <<
"Random Training Iteration: " << iter+1 <<
" Epoch: " << epoch <<
" Error: " << error <<
" Delta: " << delta << std::endl;
808 if( ++epoch >= maxNumEpochs ){
809 keepTraining =
false;
811 if( delta <= minChange && epoch >= minNumEpochs ){
812 keepTraining =
false;
819 trainingResultsObserverManager.notifyObservers( result );
824 if( lastError < bestError ){
826 bestError = lastError;
827 bestTSError = totalSquaredTrainingError;
828 bestRMSError = rootMeanSquaredTrainingError;
830 trainingErrorLog = tempTrainingErrorLog;
835 trainingLog <<
"Best RMSError: " << bestRMSError <<
" in Random Training Iteration: " << bestIter+1 << std::endl;
839 errorLog <<
"train(RegressionData trainingData) - NAN Found!" << std::endl;
845 trainingError = bestRMSError;
855 Float omBeta = 1.0 - beta;
858 feedforward(trainingExample,inputNeuronsOuput,hiddenNeuronsOutput,outputNeuronsOutput);
861 for(UINT i=0; i<numOutputNeurons; i++){
862 deltaO[i] = outputLayer[i].getDerivative( outputNeuronsOutput[i] ) * (targetVector[i]-outputNeuronsOutput[i]);
866 for(UINT i=0; i<numHiddenNeurons; i++){
868 for(UINT j=0; j<numOutputNeurons; j++){
869 sum += outputLayer[j].weights[i] * deltaO[j];
871 deltaH[i] = hiddenLayer[i].getDerivative( hiddenNeuronsOutput[i] ) * sum;
875 for(UINT i=0; i<numHiddenNeurons; i++){
876 for(UINT j=0; j<numInputNeurons; j++){
879 update = alpha * inputNeuronsOuput[j] * deltaH[i];
882 hiddenLayer[i].weights[j] += update;
885 hiddenLayer[i].previousUpdate[j] = update;
890 for(UINT i=0; i<numOutputNeurons; i++){
891 for(UINT j=0; j<numHiddenNeurons; j++){
894 update = alpha * hiddenNeuronsOutput[j] * deltaO[i];
897 outputLayer[i].weights[j] += update;
900 outputLayer[i].previousUpdate[j] = update;
905 for(UINT i=0; i<numHiddenNeurons; i++){
908 update = alpha * deltaH[i];
911 hiddenLayer[i].bias += update;
914 hiddenLayer[i].previousBiasUpdate = update;
918 for(UINT i=0; i<numOutputNeurons; i++){
921 update = alpha * deltaO[i];
924 outputLayer[i].bias += update;
927 outputLayer[i].previousBiasUpdate = update;
932 for(UINT i=0; i<numOutputNeurons; i++){
933 error += SQR( targetVector[i] - outputNeuronsOutput[i] );
941 if( inputNeuronsOuput.size() != numInputNeurons ) inputNeuronsOuput.
resize(numInputNeurons,0);
942 if( hiddenNeuronsOutput.size() != numHiddenNeurons ) hiddenNeuronsOutput.
resize(numHiddenNeurons,0);
943 if( outputNeuronsOutput.size() != numOutputNeurons ) outputNeuronsOutput.
resize(numOutputNeurons,0);
947 for(UINT i=0; i<numInputNeurons; i++){
948 trainingExample[i] =
scale(trainingExample[i],inputVectorRanges[i].minValue,inputVectorRanges[i].maxValue,MLP_NEURON_MIN_TARGET,MLP_NEURON_MAX_TARGET);
954 for(UINT i=0; i<numInputNeurons; i++){
955 input[0] = trainingExample[i];
956 inputNeuronsOuput[i] = inputLayer[i].fire( input );
960 for(UINT i=0; i<numHiddenNeurons; i++){
961 hiddenNeuronsOutput[i] = hiddenLayer[i].fire( inputNeuronsOuput );
965 for(UINT i=0; i<numOutputNeurons; i++){
966 outputNeuronsOutput[i] = outputLayer[i].fire( hiddenNeuronsOutput );
971 for(UINT i=0; i<numOutputNeurons; i++){
972 outputNeuronsOutput[i] =
scale(outputNeuronsOutput[i],MLP_NEURON_MIN_TARGET,MLP_NEURON_MAX_TARGET,targetVectorRanges[i].minValue,targetVectorRanges[i].maxValue);
976 return outputNeuronsOutput;
982 if( inputNeuronsOuput.size() != numInputNeurons ) inputNeuronsOuput.
resize(numInputNeurons,0);
983 if( hiddenNeuronsOutput.size() != numHiddenNeurons ) hiddenNeuronsOutput.
resize(numHiddenNeurons,0);
984 if( outputNeuronsOutput.size() != numOutputNeurons ) outputNeuronsOutput.
resize(numOutputNeurons,0);
988 for(UINT i=0; i<numInputNeurons; i++){
990 inputNeuronsOuput[i] = inputLayer[i].fire( input );
994 for(UINT i=0; i<numHiddenNeurons; i++){
995 hiddenNeuronsOutput[i] = hiddenLayer[i].fire( inputNeuronsOuput );
999 for(UINT i=0; i<numOutputNeurons; i++){
1000 outputNeuronsOutput[i] = outputLayer[i].fire( hiddenNeuronsOutput );
1006 std::cout<<
"***************** MLP *****************\n";
1007 std::cout<<
"NumInputNeurons: "<<numInputNeurons<< std::endl;
1008 std::cout<<
"NumHiddenNeurons: "<<numHiddenNeurons<< std::endl;
1009 std::cout<<
"NumOutputNeurons: "<<numOutputNeurons<< std::endl;
1011 std::cout<<
"InputWeights:\n";
1012 for(UINT i=0; i<numInputNeurons; i++){
1013 std::cout<<
"Neuron: "<<i<<
" Bias: " << inputLayer[i].bias <<
" Weights: ";
1014 for(UINT j=0; j<inputLayer[i].weights.
getSize(); j++){
1015 std::cout << inputLayer[i].weights[j] <<
"\t";
1016 } std::cout << std::endl;
1019 std::cout<<
"HiddenWeights:\n";
1020 for(UINT i=0; i<numHiddenNeurons; i++){
1021 std::cout<<
"Neuron: "<<i<<
" Bias: " << hiddenLayer[i].bias <<
" Weights: ";
1022 for(UINT j=0; j<hiddenLayer[i].weights.
getSize(); j++){
1023 std::cout << hiddenLayer[i].weights[j] <<
"\t";
1024 } std::cout << std::endl;
1027 std::cout<<
"OutputWeights:\n";
1028 for(UINT i=0; i<numOutputNeurons; i++){
1029 std::cout<<
"Neuron: "<<i<<
" Bias: " << outputLayer[i].bias <<
" Weights: ";
1030 for(UINT j=0; j<outputLayer[i].weights.
getSize(); j++){
1031 std::cout << outputLayer[i].weights[j] <<
"\t";
1032 } std::cout << std::endl;
1040 for(UINT i=0; i<numInputNeurons; i++){
1041 if( isNAN(inputLayer[i].bias) )
return true;
1042 N = inputLayer[i].weights.
getSize();
1043 for(UINT j=0; j<N; j++){
1044 if( isNAN(inputLayer[i].weights[j]) )
return true;
1048 for(UINT i=0; i<numHiddenNeurons; i++){
1049 if( isNAN(hiddenLayer[i].bias) )
return true;
1050 N = hiddenLayer[i].weights.
getSize();
1051 for(UINT j=0; j<N; j++){
1052 if( isNAN(hiddenLayer[i].weights[j]) )
return true;
1056 for(UINT i=0; i<numOutputNeurons; i++){
1057 if( isNAN(outputLayer[i].bias) )
return true;
1058 N = outputLayer[i].weights.
getSize();
1059 for(UINT j=0; j<N; j++){
1060 if( isNAN(outputLayer[i].weights[j]) )
return true;
1067 bool inline MLP::isNAN(
const Float v)
const{
1068 if( v != v )
return true;
1074 if( !file.is_open() ){
1075 errorLog <<
"saveModelToFile(fstream &file) - File is not open!" << std::endl;
1079 file <<
"GRT_MLP_FILE_V2.0\n";
1083 errorLog <<
"saveModelToFile(fstream &file) - Failed to save Regressifier base settings to file!" << std::endl;
1087 file <<
"NumInputNeurons: "<<numInputNeurons<< std::endl;
1088 file <<
"NumHiddenNeurons: "<<numHiddenNeurons<< std::endl;
1089 file <<
"NumOutputNeurons: "<<numOutputNeurons<< std::endl;
1093 file <<
"NumRandomTrainingIterations: " << numRandomTrainingIterations << std::endl;
1094 file <<
"Momentum: " << momentum << std::endl;
1095 file <<
"Gamma: " << gamma << std::endl;
1096 file <<
"ClassificationMode: " << classificationModeActive << std::endl;
1097 file <<
"UseNullRejection: " << useNullRejection << std::endl;
1098 file <<
"RejectionThreshold: " << nullRejectionThreshold << std::endl;
1101 file <<
"InputLayer: \n";
1102 for(UINT i=0; i<numInputNeurons; i++){
1103 file <<
"InputNeuron: " << i+1 << std::endl;
1104 file <<
"NumInputs: " << inputLayer[i].numInputs << std::endl;
1105 file <<
"Bias: " << inputLayer[i].bias << std::endl;
1106 file <<
"Gamma: " << inputLayer[i].gamma << std::endl;
1107 file <<
"Weights: " << std::endl;
1108 for(UINT j=0; j<inputLayer[i].numInputs; j++){
1109 file << inputLayer[i].weights[j] <<
"\t";
1115 file <<
"HiddenLayer: \n";
1116 for(UINT i=0; i<numHiddenNeurons; i++){
1117 file <<
"HiddenNeuron: " << i+1 << std::endl;
1118 file <<
"NumInputs: " << hiddenLayer[i].numInputs << std::endl;
1119 file <<
"Bias: " << hiddenLayer[i].bias << std::endl;
1120 file <<
"Gamma: " << hiddenLayer[i].gamma << std::endl;
1121 file <<
"Weights: " << std::endl;
1122 for(UINT j=0; j<hiddenLayer[i].numInputs; j++){
1123 file << hiddenLayer[i].weights[j] <<
"\t";
1129 file <<
"OutputLayer: \n";
1130 for(UINT i=0; i<numOutputNeurons; i++){
1131 file <<
"OutputNeuron: " << i+1 << std::endl;
1132 file <<
"NumInputs: " << outputLayer[i].numInputs << std::endl;
1133 file <<
"Bias: " << outputLayer[i].bias << std::endl;
1134 file <<
"Gamma: " << outputLayer[i].gamma << std::endl;
1135 file <<
"Weights: " << std::endl;
1136 for(UINT j=0; j<outputLayer[i].numInputs; j++){
1137 file << outputLayer[i].weights[j] <<
"\t";
1148 std::string activationFunction;
1153 if( !file.is_open() ){
1154 errorLog <<
"loadModelFromFile(fstream &file) - File is not open!" << std::endl;
1163 if( word ==
"GRT_MLP_FILE_V1.0" ){
1164 return loadLegacyModelFromFile( file );
1168 if( word !=
"GRT_MLP_FILE_V2.0" ){
1170 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find file header!" << std::endl;
1177 errorLog <<
"loadModelFromFile(fstream &file) - Failed to load regressifier base settings from file!" << std::endl;
1182 if(word !=
"NumInputNeurons:"){
1184 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find NumInputNeurons!" << std::endl;
1187 file >> numInputNeurons;
1188 numInputDimensions = numInputNeurons;
1191 if(word !=
"NumHiddenNeurons:"){
1193 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find NumHiddenNeurons!" << std::endl;
1196 file >> numHiddenNeurons;
1199 if(word !=
"NumOutputNeurons:"){
1201 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find NumOutputNeurons!" << std::endl;
1204 file >> numOutputNeurons;
1207 if(word !=
"InputLayerActivationFunction:"){
1209 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find InputLayerActivationFunction!" << std::endl;
1212 file >> activationFunction;
1216 if(word !=
"HiddenLayerActivationFunction:"){
1218 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find HiddenLayerActivationFunction!" << std::endl;
1221 file >> activationFunction;
1225 if(word !=
"OutputLayerActivationFunction:"){
1227 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find OutputLayerActivationFunction!" << std::endl;
1230 file >> activationFunction;
1234 if(word !=
"NumRandomTrainingIterations:"){
1236 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find NumRandomTrainingIterations!" << std::endl;
1239 file >> numRandomTrainingIterations;
1242 if(word !=
"Momentum:"){
1244 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Momentum!" << std::endl;
1250 if(word !=
"Gamma:"){
1252 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Gamma!" << std::endl;
1258 if(word !=
"ClassificationMode:"){
1260 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find ClassificationMode!" << std::endl;
1263 file >> classificationModeActive;
1266 if(word !=
"UseNullRejection:"){
1268 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find UseNullRejection!" << std::endl;
1271 file >> useNullRejection;
1274 if(word !=
"RejectionThreshold:"){
1276 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find RejectionThreshold!" << std::endl;
1279 file >> nullRejectionThreshold;
1281 if( trained ) initialized =
true;
1282 else init(numInputNeurons,numHiddenNeurons,numOutputNeurons);
1287 inputLayer.
resize( numInputNeurons );
1288 hiddenLayer.
resize( numHiddenNeurons );
1289 outputLayer.
resize( numOutputNeurons );
1293 if(word !=
"InputLayer:"){
1295 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find InputLayer!" << std::endl;
1299 for(UINT i=0; i<numInputNeurons; i++){
1300 UINT tempNeuronID = 0;
1303 if(word !=
"InputNeuron:"){
1305 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find InputNeuron!" << std::endl;
1308 file >> tempNeuronID;
1310 if( tempNeuronID != i+1 ){
1312 errorLog <<
"loadModelFromFile(fstream &file) - InputNeuron ID does not match!" << std::endl;
1317 if(word !=
"NumInputs:"){
1319 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find NumInputs!" << std::endl;
1322 file >> inputLayer[i].numInputs;
1325 inputLayer[i].weights.
resize( inputLayer[i].numInputs );
1328 if(word !=
"Bias:"){
1330 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Bias!" << std::endl;
1333 file >> inputLayer[i].bias;
1336 if(word !=
"Gamma:"){
1338 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Gamma!" << std::endl;
1341 file >> inputLayer[i].gamma;
1344 if(word !=
"Weights:"){
1346 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Weights!" << std::endl;
1350 for(UINT j=0; j<inputLayer[i].numInputs; j++){
1351 file >> inputLayer[i].weights[j];
1357 if(word !=
"HiddenLayer:"){
1359 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find HiddenLayer!" << std::endl;
1363 for(UINT i=0; i<numHiddenNeurons; i++){
1364 UINT tempNeuronID = 0;
1367 if(word !=
"HiddenNeuron:"){
1369 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find HiddenNeuron!" << std::endl;
1372 file >> tempNeuronID;
1374 if( tempNeuronID != i+1 ){
1376 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find HiddenNeuron ID does not match!" << std::endl;
1381 if(word !=
"NumInputs:"){
1383 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find NumInputs!" << std::endl;
1386 file >> hiddenLayer[i].numInputs;
1389 hiddenLayer[i].weights.
resize( hiddenLayer[i].numInputs );
1392 if(word !=
"Bias:"){
1394 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Bias!" << std::endl;
1397 file >> hiddenLayer[i].bias;
1400 if(word !=
"Gamma:"){
1402 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Gamma!" << std::endl;
1405 file >> hiddenLayer[i].gamma;
1408 if(word !=
"Weights:"){
1410 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Weights!" << std::endl;
1414 for(
unsigned int j=0; j<hiddenLayer[i].numInputs; j++){
1415 file >> hiddenLayer[i].weights[j];
1421 if(word !=
"OutputLayer:"){
1423 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find OutputLayer!" << std::endl;
1427 for(UINT i=0; i<numOutputNeurons; i++){
1428 UINT tempNeuronID = 0;
1431 if(word !=
"OutputNeuron:"){
1433 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find OutputNeuron!" << std::endl;
1436 file >> tempNeuronID;
1438 if( tempNeuronID != i+1 ){
1440 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find OuputNeuron ID does not match!!" << std::endl;
1445 if(word !=
"NumInputs:"){
1447 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find NumInputs!" << std::endl;
1450 file >> outputLayer[i].numInputs;
1453 outputLayer[i].weights.
resize( outputLayer[i].numInputs );
1456 if(word !=
"Bias:"){
1458 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Bias!" << std::endl;
1461 file >> outputLayer[i].bias;
1464 if(word !=
"Gamma:"){
1466 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Gamma!" << std::endl;
1469 file >> outputLayer[i].gamma;
1472 if(word !=
"Weights:"){
1474 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Weights!" << std::endl;
1478 for(UINT j=0; j<outputLayer[i].numInputs; j++){
1479 file >> outputLayer[i].weights[j];
1489 if( classificationModeActive )
1490 return numOutputNeurons;
1495 return numInputNeurons;
1499 return numHiddenNeurons;
1503 return numOutputNeurons;
1507 return inputLayerActivationFunction;
1511 return hiddenLayerActivationFunction;
1515 return outputLayerActivationFunction;
1519 return numRandomTrainingIterations;
1523 return learningRate;
1535 return trainingError;
1539 return classificationModeActive;
1543 return !classificationModeActive;
1559 return trainingErrorLog;
1563 return useNullRejection;
1567 return nullRejectionCoeff;
1571 return nullRejectionThreshold;
1575 if( trained )
return maxLikelihood;
1580 if( trained && classificationModeActive )
return classLikelihoods;
1586 if( trained && classificationModeActive )
return regressionData;
1591 if( trained && classificationModeActive )
return predictedClassLabel;
1596 std::string activationName;
1598 switch(activationFunction){
1599 case(Neuron::LINEAR):
1600 activationName =
"LINEAR";
1602 case(Neuron::SIGMOID):
1603 activationName =
"SIGMOID";
1605 case(Neuron::BIPOLAR_SIGMOID):
1606 activationName =
"BIPOLAR_SIGMOID";
1609 activationName =
"UNKNOWN";
1613 return activationName;
1617 UINT activationFunction = 0;
1619 if(activationName ==
"LINEAR" ){
1620 activationFunction = 0;
1621 return activationFunction;
1623 if(activationName ==
"SIGMOID" ){
1624 activationFunction = 1;
1625 return activationFunction;
1627 if(activationName ==
"BIPOLAR_SIGMOID" ){
1628 activationFunction = 2;
1629 return activationFunction;
1631 return activationFunction;
1635 if( actvationFunction >= Neuron::LINEAR && actvationFunction < Neuron::NUMBER_OF_ACTIVATION_FUNCTIONS )
return true;
1642 warningLog <<
"setInputLayerActivationFunction(const UINT activationFunction) - The activation function is not valid. It should be one of the Neuron ActivationFunctions enums." << std::endl;
1645 this->inputLayerActivationFunction = activationFunction;
1648 return init(numInputNeurons,numHiddenNeurons,numOutputNeurons);
1658 warningLog <<
"setHiddenLayerActivationFunction(const UINT activationFunction) - The activation function is not valid. It should be one of the Neuron ActivationFunctions enums." << std::endl;
1661 this->hiddenLayerActivationFunction = activationFunction;
1664 return init(numInputNeurons,numHiddenNeurons,numOutputNeurons);
1674 warningLog <<
"setOutputLayerActivationFunction(const UINT activationFunction) - The activation function is not valid. It should be one of the Neuron ActivationFunctions enums." << std::endl;
1677 this->outputLayerActivationFunction = activationFunction;
1680 return init(numInputNeurons,numHiddenNeurons,numOutputNeurons);
1691 if( momentum >= 0 && momentum <= 1.0 ){
1692 this->momentum = momentum;
1701 warningLog <<
"setGamma(const Float gamma) - Gamma must be greater than zero!" << std::endl;
1704 this->gamma = gamma;
1707 return init(numInputNeurons,numHiddenNeurons,numOutputNeurons);
1714 if( numRandomTrainingIterations > 0 ){
1715 this->numRandomTrainingIterations = numRandomTrainingIterations;
1722 this->useNullRejection = useNullRejection;
1727 if( nullRejectionCoeff > 0 ){
1728 this->nullRejectionCoeff = nullRejectionCoeff;
1734 bool MLP::loadLegacyModelFromFile( std::fstream &file ){
1739 if(word !=
"NumInputNeurons:"){
1741 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find NumInputNeurons!" << std::endl;
1744 file >> numInputNeurons;
1745 numInputDimensions = numInputNeurons;
1748 if(word !=
"NumHiddenNeurons:"){
1750 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find NumHiddenNeurons!" << std::endl;
1753 file >> numHiddenNeurons;
1756 if(word !=
"NumOutputNeurons:"){
1758 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find NumOutputNeurons!" << std::endl;
1761 file >> numOutputNeurons;
1764 if(word !=
"InputLayerActivationFunction:"){
1766 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find InputLayerActivationFunction!" << std::endl;
1773 if(word !=
"HiddenLayerActivationFunction:"){
1775 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find HiddenLayerActivationFunction!" << std::endl;
1782 if(word !=
"OutputLayerActivationFunction:"){
1784 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find OutputLayerActivationFunction!" << std::endl;
1791 if(word !=
"MinNumEpochs:"){
1793 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find MinNumEpochs!" << std::endl;
1796 file >> minNumEpochs;
1799 if(word !=
"MaxNumEpochs:"){
1801 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find MaxNumEpochs!" << std::endl;
1804 file >> maxNumEpochs;
1807 if(word !=
"NumRandomTrainingIterations:"){
1809 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find NumRandomTrainingIterations!" << std::endl;
1812 file >> numRandomTrainingIterations;
1815 if(word !=
"ValidationSetSize:"){
1817 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find ValidationSetSize!" << std::endl;
1820 file >> validationSetSize;
1823 if(word !=
"MinChange:"){
1825 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find MinChange!" << std::endl;
1831 if(word !=
"TrainingRate:"){
1833 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find TrainingRate!" << std::endl;
1836 file >> learningRate;
1839 if(word !=
"Momentum:"){
1841 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Momentum!" << std::endl;
1847 if(word !=
"Gamma:"){
1849 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Gamma!" << std::endl;
1855 if(word !=
"UseValidationSet:"){
1857 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find UseValidationSet!" << std::endl;
1860 file >> useValidationSet;
1863 if(word !=
"RandomiseTrainingOrder:"){
1865 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find RandomiseTrainingOrder!" << std::endl;
1868 file >> randomiseTrainingOrder;
1871 if(word !=
"UseScaling:"){
1873 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find UseScaling!" << std::endl;
1879 if(word !=
"ClassificationMode:"){
1881 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find ClassificationMode!" << std::endl;
1884 file >> classificationModeActive;
1887 if(word !=
"UseNullRejection:"){
1889 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find UseNullRejection!" << std::endl;
1892 file >> useNullRejection;
1895 if(word !=
"RejectionThreshold:"){
1897 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find RejectionThreshold!" << std::endl;
1900 file >> nullRejectionThreshold;
1903 inputLayer.
resize( numInputNeurons );
1904 hiddenLayer.
resize( numHiddenNeurons );
1905 outputLayer.
resize( numOutputNeurons );
1909 if(word !=
"InputLayer:"){
1911 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find InputLayer!" << std::endl;
1915 for(UINT i=0; i<numInputNeurons; i++){
1916 UINT tempNeuronID = 0;
1919 if(word !=
"InputNeuron:"){
1921 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find InputNeuron!" << std::endl;
1924 file >> tempNeuronID;
1926 if( tempNeuronID != i+1 ){
1928 errorLog <<
"loadModelFromFile(fstream &file) - InputNeuron ID does not match!" << std::endl;
1933 if(word !=
"NumInputs:"){
1935 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find NumInputs!" << std::endl;
1938 file >> inputLayer[i].numInputs;
1941 inputLayer[i].weights.
resize( inputLayer[i].numInputs );
1944 if(word !=
"Bias:"){
1946 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Bias!" << std::endl;
1949 file >> inputLayer[i].bias;
1952 if(word !=
"Gamma:"){
1954 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Gamma!" << std::endl;
1957 file >> inputLayer[i].gamma;
1960 if(word !=
"Weights:"){
1962 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Weights!" << std::endl;
1966 for(UINT j=0; j<inputLayer[i].numInputs; j++){
1967 file >> inputLayer[i].weights[j];
1973 if(word !=
"HiddenLayer:"){
1975 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find HiddenLayer!" << std::endl;
1979 for(UINT i=0; i<numHiddenNeurons; i++){
1980 UINT tempNeuronID = 0;
1983 if(word !=
"HiddenNeuron:"){
1985 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find HiddenNeuron!" << std::endl;
1988 file >> tempNeuronID;
1990 if( tempNeuronID != i+1 ){
1992 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find HiddenNeuron ID does not match!" << std::endl;
1997 if(word !=
"NumInputs:"){
1999 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find NumInputs!" << std::endl;
2002 file >> hiddenLayer[i].numInputs;
2005 hiddenLayer[i].weights.
resize( hiddenLayer[i].numInputs );
2008 if(word !=
"Bias:"){
2010 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Bias!" << std::endl;
2013 file >> hiddenLayer[i].bias;
2016 if(word !=
"Gamma:"){
2018 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Gamma!" << std::endl;
2021 file >> hiddenLayer[i].gamma;
2024 if(word !=
"Weights:"){
2026 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Weights!" << std::endl;
2030 for(
unsigned int j=0; j<hiddenLayer[i].numInputs; j++){
2031 file >> hiddenLayer[i].weights[j];
2037 if(word !=
"OutputLayer:"){
2039 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find OutputLayer!" << std::endl;
2043 for(UINT i=0; i<numOutputNeurons; i++){
2044 UINT tempNeuronID = 0;
2047 if(word !=
"OutputNeuron:"){
2049 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find OutputNeuron!" << std::endl;
2052 file >> tempNeuronID;
2054 if( tempNeuronID != i+1 ){
2056 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find OuputNeuron ID does not match!!" << std::endl;
2061 if(word !=
"NumInputs:"){
2063 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find NumInputs!" << std::endl;
2066 file >> outputLayer[i].numInputs;
2069 outputLayer[i].weights.
resize( outputLayer[i].numInputs );
2072 if(word !=
"Bias:"){
2074 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Bias!" << std::endl;
2077 file >> outputLayer[i].bias;
2080 if(word !=
"Gamma:"){
2082 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Gamma!" << std::endl;
2085 file >> outputLayer[i].gamma;
2088 if(word !=
"Weights:"){
2090 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find Weights!" << std::endl;
2094 for(UINT j=0; j<outputLayer[i].numInputs; j++){
2095 file >> outputLayer[i].weights[j];
2101 inputVectorRanges.
resize( numInputNeurons );
2102 targetVectorRanges.
resize( numOutputNeurons );
2106 if(word !=
"InputVectorRanges:"){
2108 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find InputVectorRanges!" << std::endl;
2111 for(UINT j=0; j<inputVectorRanges.size(); j++){
2112 file >> inputVectorRanges[j].minValue;
2113 file >> inputVectorRanges[j].maxValue;
2117 if(word !=
"OutputVectorRanges:"){
2119 errorLog <<
"loadModelFromFile(fstream &file) - Failed to find OutputVectorRanges!" << std::endl;
2122 for(UINT j=0; j<targetVectorRanges.size(); j++){
2123 file >> targetVectorRanges[j].minValue;
2124 file >> targetVectorRanges[j].maxValue;
bool setLearningRate(const Float learningRate)
UINT getNumHiddenNeurons() const
MLP & operator=(const MLP &rhs)
VectorFloat feedforward(VectorFloat data)
#define DEFAULT_NULL_LIKELIHOOD_VALUE
VectorFloat getClassDistances() const
RegressionData reformatAsRegressionData() const
bool setTrainingRate(const Float trainingRate)
Float scale(const Float &x, const Float &minSource, const Float &maxSource, const Float &minTarget, const Float &maxTarget, const bool constrain=false)
bool getNullRejectionEnabled() const
Vector< MinMax > getInputRanges() const
bool validateActivationFunction(const UINT avactivationFunction) const
virtual bool resize(const unsigned int size)
Float back_prop(const VectorFloat &inputVector, const VectorFloat &targetVector, const Float alpha, const Float beta)
static Float getMin(const VectorFloat &x)
UINT getInputLayerActivationFunction() const
bool copyBaseVariables(const Regressifier *regressifier)
UINT getNumInputDimensions() const
Vector< Neuron > getHiddenLayer() const
bool init(const UINT numInputNeurons, const UINT numHiddenNeurons, const UINT numOutputNeurons)
std::string activationFunctionToString(const UINT activationFunction) const
Vector< VectorFloat > getTrainingLog() const
unsigned int getSize() const
UINT getNumRandomTrainingIterations() const
Vector< MinMax > getTargetRanges() const
bool saveBaseSettingsToFile(std::fstream &file) const
virtual bool train_(ClassificationData &trainingData)
bool scale(const Float minTarget, const Float maxTarget)
bool setInputLayerActivationFunction(const UINT activationFunction)
This class implements a Multilayer Perceptron Artificial Neural Network.
UINT getNumTargetDimensions() const
Vector< Neuron > getOutputLayer() const
bool setHiddenLayerActivationFunction(const UINT activationFunction)
std::string getRegressifierType() const
virtual bool saveModelToFile(std::fstream &file) const
Vector< Neuron > getInputLayer() const
UINT getNumOutputNeurons() const
bool loadBaseSettingsFromFile(std::fstream &file)
void printNetwork() const
UINT getNumDimensions() const
UINT getNumClasses() const
UINT activationFunctionFromString(const std::string activationName) const
Float getMomentum() const
virtual bool print() const
bool setNullRejection(const bool useNullRejection)
VectorFloat getClassLikelihoods() const
UINT getOutputLayerActivationFunction() const
bool setMomentum(const Float momentum)
Float getTrainingError() const
Float getMaximumLikelihood() const
UINT getHiddenLayerActivationFunction() const
Float getNullRejectionCoeff() const
virtual bool deepCopyFrom(const Regressifier *regressifier)
bool setNumRandomTrainingIterations(const UINT numRandomTrainingIterations)
bool setGamma(const Float gamma)
int getRandomNumberInt(int minRange, int maxRange)
UINT getNumClasses() const
Float getNullRejectionThreshold() const
virtual bool loadModelFromFile(std::fstream &file)
bool setNullRejectionCoeff(const Float nullRejectionCoeff)
bool getRegressionModeActive() const
Float getTrainingRate() const
bool setOutputLayerActivationFunction(const UINT activationFunction)
RegressionData partition(const UINT trainingSizePercentage)
static Float sum(const VectorFloat &x)
void setSeed(unsigned long long seed=0)
UINT getNumSamples() const
UINT getPredictedClassLabel() const
bool getClassificationModeActive() const
virtual bool predict_(VectorFloat &inputVector)
UINT getNumInputNeurons() const