diff --git a/src/methods/lars/lars.cpp b/src/methods/lars/lars.cpp index b16291ec2fe743685a128e5181a57ce2c46e4555..4c93aa3fe5f7a2aeee4c38b00c4602f1d9b46d87 100644 --- a/src/methods/lars/lars.cpp +++ b/src/methods/lars/lars.cpp @@ -24,108 +24,47 @@ bool isModelTrained = false; // const double lambda2, // const double tolerance // output: -void initModelNoDataNoGram(SP_integer useCholesky, - double lambda1, double lambda2, double tol) +void initAndTrainModel(SP_integer useCholesky, + double lambda1, double lambda2, double tol, + float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum, + float *responsesArr, SP_integer responsesArrSize, + SP_integer transposeData, double *error) { + bool transpose = (transposeData == 1); regressor = new LARS((useCholesky == 1), lambda1, lambda2, tol); - isModelTrained = false; -} - -// input: const bool useCholesky, -// const arma::mat & gramMatrix, -// const double lambda1, -// const double lambda2, -// const double tolerance -// output: -void initModelNoDataWithGram(SP_integer useCholesky, - float *gramArr, SP_integer gramSize, SP_integer gramRowNum, - double lambda1, double lambda2, double tol) -{ - mat gramMatrix = convertArrayToMat(gramArr, gramSize, gramRowNum); - - regressor = new LARS((useCholesky == 1), gramMatrix, lambda1, lambda2, tol); - isModelTrained = false; -} -// input: const arma::mat & dataMatrix, -// const arma::rowvec & responses, -// const bool transposeData, -// const bool useCholesky, -// const double lambda1, -// const double lambda2, -// const double tolerance -// output: -void initModelWithDataNoGram(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum, - float *responsesArr, SP_integer responsesArrSize, - SP_integer transposeData, - SP_integer useCholesky, - double lambda1, double lambda2, double tol) -{ // convert the Prolog array to arma::mat mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum); // check if labels fit the data - if (data.n_cols != responsesArrSize) - { - raisePrologSystemExeption("The number of data points does not match the number of labels!"); - return; - } - // convert the Prolog array to arma::rowvec - rowvec responsesVector = convertArrayToRowvec(responsesArr, responsesArrSize); - - - - try + if (!transpose && data.n_cols != responsesArrSize) { - regressor = new LARS(data, responsesVector, (transposeData == 1), (useCholesky == 1), lambda1, lambda2, tol); + raisePrologSystemExeption("The number of data points does not match the number of labels!"); + return; } - catch(const std::exception& e) + if (transpose && data.n_rows != responsesArrSize) { - raisePrologSystemExeption(e.what()); + raisePrologSystemExeption("The number of data points does not match the number of labels!"); return; } - isModelTrained = true; -} - -// input: const arma::mat & dataMatrix, -// const arma::rowvec & responses, -// const bool transposeData, -// const bool useCholesky, -// const arma::mat & gramMatrix, -// const double lambda1, -// const double lambda2, -// const double tolerance -// output: -void initModelWithDataWithGram(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum, - float *responsesArr, SP_integer responsesArrSize, - SP_integer transposeData, - SP_integer useCholesky, - float *gramMatArr, SP_integer gramMatSize, SP_integer gramMatRowNum, - double lambda1, double lambda2, double tol) -{ - // convert the Prolog array to arma::mat - mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum); - // check if labels fit the data - if (data.n_cols != responsesArrSize) - { - raisePrologSystemExeption("The number of data points does not match the number of labels!"); - return; - } // convert the Prolog array to arma::rowvec rowvec responsesVector = convertArrayToRowvec(responsesArr, responsesArrSize); - // convert the Prolog array to arma::mat - mat gram = convertArrayToMat(gramMatArr, gramMatSize, gramMatRowNum); + // create the ReturnVector + vec beta; + + // run the model function try - { - regressor = new LARS(data, responsesVector, (transposeData == 1), (useCholesky == 1), gram, lambda1, lambda2, tol); - } - catch(const std::exception& e) - { - raisePrologSystemExeption(e.what()); + { + *error = regressor.Train(data.t(), responsesVector, beta, transpose); + } + catch(const std::exception& e) + { + raisePrologSystemExeption(e.what()); return; - } + } + isModelTrained = true; } @@ -168,7 +107,7 @@ void betaPath(float **betaPathArr, SP_integer *betaPathColNum, SP_integer *betaP return; } // get the betaPath matrix - vector<vec> matrix = regressor.BetaPath(); + std::vector<vec> matrix = regressor.BetaPath(); // return the matrix dimensions *betaPathColNum = matrix[0].size(); @@ -191,13 +130,15 @@ double computeError(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMa raisePrologSystemExeption("The Model is not trained!"); return 0.0; } - if(dataMatSize / dataMatRowNum != responsesArrSize) - { - cout << "Target dim doesnt fit to the Data dim" << endl; - return 0.0; - } // convert the Prolog array to arma::mat mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum); + // check if labels and weights fit the data + if (data.n_cols != responsesArrSize) + { + raisePrologSystemExeption("The number of data points does not match the number of labels!"); + return 0.0; + } + // convert the Prolog array to arma::rowvec rowvec responsesVector = convertArrayToRowvec(responsesArr, responsesArrSize); @@ -205,7 +146,7 @@ double computeError(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMa double result; try { - result = regressor.ComputeError(data, responsesVector, (rowMajor == 1)); + result = regressor.ComputeError(data.t(), responsesVector, (rowMajor == 1)); } catch(const std::exception& e) { @@ -262,13 +203,18 @@ void predict(float *pointsMatArr, SP_integer pointsMatSize, SP_integer pointsMat } // convert the Prolog array to arma::mat mat points = convertArrayToMat(pointsMatArr, pointsMatSize, pointsMatRowNum); + if(points.n_rows != regressor.Beta().n_elem) + { + raisePrologSystemExeption("The given Datapoints have a diffrent Dimension than trained!"); + return; + } // create the ReturnVector rowvec predicReturnVector; try { - regressor.Predict(points, predicReturnVector, (rowMajor == 1)); + regressor.Predict(points.t(), predicReturnVector, (rowMajor == 1)); } catch(const std::exception& e) { @@ -279,48 +225,4 @@ void predict(float *pointsMatArr, SP_integer pointsMatSize, SP_integer pointsMat // return the Vector returnVectorInformation(predicReturnVector, predicArr, predicArrSize); -} - -// input: const arma::mat &data, -// const arma::rowvec &responses, -// arma::vec& beta, -// const bool transposeData -// -// output: double minimum cost error -double train(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum, - float *responsesArr, SP_integer responsesArrSize, - float **betaArr, SP_integer *betaArrSize, - SP_integer transposeData) -{ - if(dataMatSize / dataMatRowNum != responsesArrSize) - { - cout << "Target dim doesnt fit to the Data dim" << endl; - return 0.0; - } - // convert the Prolog array to arma::mat - mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum); - // convert the Prolog array to arma::rowvec - rowvec responsesVector = convertArrayToRowvec(responsesArr, responsesArrSize); - - // create the ReturnVector - vec betaReturnVector; - - - // run the model function - double error; - try - { - error = regressor.Train(data, responsesVector, betaReturnVector, (transposeData == 1)); - } - catch(const std::exception& e) - { - raisePrologSystemExeption(e.what()); - return 0.0; - } - - - // return the Vector - returnVectorInformation(betaReturnVector, betaArr, betaArrSize); - isModelTrained = true; - return error; -} +} \ No newline at end of file diff --git a/src/methods/lars/lars.pl b/src/methods/lars/lars.pl index 6f647dddc495013019980aba35421c4258788c3f..eb7c4d0a064ac6d859a1fd949ead6e72b775b2ce 100644 --- a/src/methods/lars/lars.pl +++ b/src/methods/lars/lars.pl @@ -1,16 +1,12 @@ -:- module(lars, [ lars_initModelNoDataNoGram/4, - lars_initModelNoDataWithGram/6, - lars_initModelWithDataNoGram/8, - lars_initModelWithDataWithGram/10, +:- module(lars, [ lars_initAndTrainModel/9, lars_activeSet/1, lars_beta/1, lars_betaPath/2, lars_computeError/5, lars_lambdaPath/1, lars_matUtriCholFactor/2, - lars_predict/4, - lars_train/6]). + lars_predict/4]). :- load_files(library(str_decl), [when(compile_time), if(changed)]). @@ -32,99 +28,27 @@ %% float32 lambda1 => 0.0, %% float32 lambda2 => 0.0, %% float32 tolerance => 1e-16 -%% -%% --Output-- -%% -%% --Description-- -%% Only initialize the LARS model. -%% -lars_initModelNoDataNoGram(UseCholesky, Lambda1, Lambda2, Tolerance) :- - Tolerance > 0, - initModelNoDataNoGramI(UseCholesky, Lambda1, Lambda2, Tolerance). - -foreign(initModelNoDataNoGram, c, initModelNoDataNoGramI(+integer, - +float32, +float32, +float32)). - - - -%% --Input-- -%% bool useCholesky => (1)true / (0)false =>false, -%% mat gramMatrix, -%% float32 lambda1 => 0.0, -%% float32 lambda2 => 0.0, -%% float32 tolerance => 1e-16 -%% -%% --Output-- -%% -%% --Description-- -%% Initialize LARS model, and pass in a precalculated Gram matrix but dont train the model. -%% -lars_initModelNoDataWithGram(UseCholesky, GramList, GramRows, Lambda1, Lambda2, Tolerance) :- - Tolerance > 0, - convert_list_to_float_array(GramList, GramRows, array(Zsize, Zrownum, Z)), - initModelNoDataWithGramI(UseCholesky, Z, Zsize, Zrownum, Lambda1, Lambda2, Tolerance). - -foreign(initModelNoDataWithGram, c, initModelNoDataWithGramI( +integer, - +pointer(float_array), +integer, +integer, - +float32, +float32, +float32)). - - -%% --Input-- -%% mat data, -%% vec responses, -%% bool transposeData => (1)true / (0)false =>true, -%% bool useCholesky => (1)true / (0)false =>false, -%% float32 lambda1 => 0.0, -%% float32 lambda2 => 0.0, -%% float32 tolerance => 1e-16 -%% -%% --Output-- -%% -%% --Description-- -%% Initialize LARS model, and train the model. -%% -lars_initModelWithDataNoGram(DataList, DataRows, ResponsesList, TransposeData, UseCholesky, Lambda1, Lambda2, Tolerance) :- - Tolerance > 0, - convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrownum, X)), - convert_list_to_float_array(ResponsesList, array(Ysize, Y)), - initModelWithDataNoGramI(X, Xsize, Xrownum, Y, Ysize, TransposeData, UseCholesky, Lambda1, Lambda2, Tolerance). - -foreign(initModelWithDataNoGram, c, initModelWithDataNoGramI( +pointer(float_array), +integer, +integer, - +pointer(float_array), +integer, - +integer, - +integer, - +float32, +float32, +float32)). - - - -%% --Input-- %% mat data, %% vec responses, -%% bool transposeData => (1)true / (0)false =>true, -%% bool useCholesky => (1)true / (0)false =>false, -%% mat gramMatrix, -%% float32 lambda1 => 0.0, -%% float32 lambda2 => 0.0, -%% float32 tolerance => 1e-16 +%% bool rowMajor => (1)true / (0)false => true %% %% --Output-- +%% float32 error %% %% --Description-- -%% Initialize LARS model, pass in a precalculated Gram matrix and train the model. +%% Only initialize the LARS model. %% -lars_initModelWithDataWithGram(DataList, DataRows, ResponsesList, TransposeData, UseCholesky, GramList, GramRows, Lambda1, Lambda2, Tolerance) :- +lars_initAndTrainModel(UseCholesky, Lambda1, Lambda2, Tolerance, DataList, DataPointsDim, ResponsesList, RowMajor, Error) :- Tolerance > 0, - convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrownum, X)), + convert_list_to_float_array(DataList, DataPointsDim, array(Xsize, Xrownum, X)), convert_list_to_float_array(ResponsesList, array(Ysize, Y)), - convert_list_to_float_array(GramList, GramRows, array(Zsize, Zrownum, Z)), - initModelWithDataWithGramI(X, Xsize, Xrownum, Y, Ysize, TransposeData, UseCholesky, Z, Zsize, Zrownum, Lambda1, Lambda2, Tolerance). + initAndTrainModelI(UseCholesky, Lambda1, Lambda2, Tolerance, X, Xsize, Xrownum, Y, Ysize, RowMajor, Error). -foreign(initModelWithDataWithGram, c, initModelWithDataWithGramI(+pointer(float_array), +integer, +integer, +foreign(initAndTrainModel, c, initAndTrainModelI( +integer, + +float32, +float32, +float32, + +pointer(float_array), +integer, +integer, +pointer(float_array), +integer, - +integer, - +integer, - +pointer(float_array), +integer, +integer, - +float32, +float32, +float32)). + +integer, -float32)). @@ -245,43 +169,14 @@ foreign(predict, c, predictI( +pointer(float_array), +integer, +integer, +integer)). -%% --Input-- -%% mat data, -%% vec responses, -%% bool rowMajor => (1)true / (0)false => true -%% -%% --Output-- -%% vec beta, -%% float32 error -%% -%% --Description-- -%% Train the LARS model with the given data. -%% -lars_train(DataList, DataRows, ResponsesList, BetaList, RowMajor, Error) :- - convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrownum, X)), - convert_list_to_float_array(ResponsesList, array(Ysize, Y)), - trainI(X, Xsize, Xrownum, Y, Ysize, Z, Zsize, RowMajor, Error), - convert_float_array_to_list(Z, Zsize, BetaList). - -foreign(train, c, trainI( +pointer(float_array), +integer, +integer, - +pointer(float_array), +integer, - -pointer(float_array), -integer, - +integer, - [-float32])). - - %% Defines what functions should be connected from main.cpp -foreign_resource(lars, [initModelNoDataNoGram, - initModelNoDataWithGram, - initModelWithDataNoGram, - initModelWithDataWithGram, +foreign_resource(lars, [initAndTrainModel, activeSet, beta, betaPath, computeError, lambdaPath, matUtriCholFactor, - predict, - train]). + predict]). :- load_foreign_resource(lars). diff --git a/src/methods/lars/lars_test.pl b/src/methods/lars/lars_test.pl index f3bb0e48a56e9a7a17cba3dd787b3257ca3e8e94..7332dbd3a788c8a98025522daca365dcea7ca8eb 100644 --- a/src/methods/lars/lars_test.pl +++ b/src/methods/lars/lars_test.pl @@ -8,128 +8,46 @@ :- use_module(lars). :- use_module('../../helper_files/helper.pl'). -reset_Model_NoTrain :- - lars_initModelNoDataNoGram(0, 0.0, 0.0, 1.0e-16). reset_Model_WithTrain :- - lars_initModelWithDataNoGram([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1], 0, 0, 0.0, 0.0, 1.0e-16). + lars_initAndTrainModel(0, 0.0, 0.0, 1.0e-16, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1], 0, _). %% -%% TESTING predicate lars_initModelNoDataNoGram/4 +%% TESTING predicate lars_initAndTrainModel/9 %% -:- begin_tests(lars_initModelNoDataNoGram). +:- begin_tests(lars_initAndTrainModel). %% Failure Tests -test(lars_InitModelNoDataNoGram_Negative_Tolerance, fail) :- - lars_initModelNoDataNoGram(0, 0.0, 0.0, -1.0e-16). - - -%% Successful Tests +test(lars_initAndTrainModel_Negative_Tolerance, fail) :- + lars_initAndTrainModel(0, 0.0, 0.0, -1.0e-16, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1], 0, _). -test(lars_InitModelNoDataNoGram_Normal_Use) :- - lars_initModelNoDataNoGram(0, 0.0, 0.0, 1.0e-16). +test(lars_initAndTrainModel_Too_Few_Labels, [error(_,system_error('The number of data points does not match the number of labels!'))]) :- + lars_initAndTrainModel(0, 0.0, 0.0, 1.0e-16, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1], 0, _). -test(lars_InitModelNoDataNoGram_Alternative_Use) :- - lars_initModelNoDataNoGram(1, -1.0, 1.0, 1.0e-16). +test(lars_initAndTrainModel_Too_Many_Labels, [error(_,system_error('The number of data points does not match the number of labels!'))]) :- + lars_initAndTrainModel(0, 0.0, 0.0, 1.0e-16, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1,0,1], 0, _). -:- end_tests(lars_initModelNoDataNoGram). +%% doesnt cause an error +test(lars_initAndTrainModel_Too_Many_Labelclasses) :- + lars_initAndTrainModel(0, 0.0, 0.0, 1.0e-16, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,2,3], 0, _). - -%% -%% TESTING predicate lars_initModelNoDataWithGram/6 -%% -:- begin_tests(lars_initModelNoDataWithGram). - -%% Failure Tests - -test(lars_InitModelNoDataWithGram_Negative_Tolerance, fail) :- - lars_initModelNoDataWithGram(0, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, 0.0, 0.0, -1.0e-16). +test(lars_initAndTrainModel_Wrongly_Transposed, [error(_,system_error('The number of data points does not match the number of labels!'))]) :- + lars_initAndTrainModel(0, 0.0, 0.0, 1.0e-16, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1], 1, _). %% Successful Tests -test(lars_InitModelNoDataWithGram_Normal_Use) :- - lars_initModelNoDataWithGram(0, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, 0.0, 0.0, 1.0e-16). - -test(lars_InitModelNoDataWithGram_Alternative_Use) :- - lars_initModelNoDataWithGram(1, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, -1.0, 1.0, 1.0e-16). - -:- end_tests(lars_initModelNoDataWithGram). - - - -%% -%% TESTING predicate lars_initModelWithDataNoGram/8 -%% -:- begin_tests(lars_initModelWithDataNoGram). - -%% Failure Tests - -test(lars_InitModelWithDataNoGram_Negative_Tolerance, fail) :- - lars_initModelWithDataNoGram([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1], 0, 0, 0.0, 0.0, -1.0e-16). - - -test(lars_InitModelWithDataNoGram_Too_Few_Labels, [error(_,system_error('The number of data points does not match the number of labels!'))]) :- - lars_initModelWithDataNoGram([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1], 0, 0, 0.0, 0.0, 1.0e-16). - -test(lars_InitModelWithDataNoGram_Too_Many_Labels, [error(_,system_error('The number of data points does not match the number of labels!'))]) :- - lars_initModelWithDataNoGram([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1,0,1], 0, 0, 0.0, 0.0, 1.0e-16). - -%% doesnt cause error -test(lars_InitModelWithDataNoGram_Too_Many_Labelclasses) :- - lars_initModelWithDataNoGram([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,2,3], 1, 0, 0.0, 0.0, 1.0e-16). - - -%% Successful Tests - -test(lars_InitModelWithDataNoGram_Normal_Use) :- - lars_initModelWithDataNoGram([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1], 1, 0, 0.0, 0.0, 1.0e-16). - -test(lars_InitModelWithDataNoGram_Alternative_Use) :- - lars_initModelWithDataNoGram([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1], 1, 1, 1.0, -1.0, 1.0e-16). - -:- end_tests(lars_initModelWithDataNoGram). - - - -%% -%% TESTING predicate lars_initModelWithDataWithGram/10 -%% -:- begin_tests(lars_initModelWithDataWithGram). - -%% Failure Tests - -test(lars_InitModelWithDataWithGram_Negative_Tolerance, fail) :- - lars_initModelWithDataWithGram([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1], 0, 0, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, 0.0, 0.0, -1.0e-16). - - -test(lars_InitModelWithDataWithGram_Too_Few_Labels, [error(_,system_error('The number of data points does not match the number of labels!'))]) :- - lars_initModelWithDataWithGram([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1], 0, 0, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, 0.0, 0.0, 1.0e-16). - -test(lars_InitModelWithDataWithGram_Too_Many_Labels, [error(_,system_error('The number of data points does not match the number of labels!'))]) :- - lars_initModelWithDataWithGram([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1,0,1], 0, 0, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, 0.0, 0.0, 1.0e-16). - -test(lars_InitModelWithDataWithGram_Too_Many_Labelclasses, [error(_,system_error('Error'))]) :- - lars_initModelWithDataWithGram([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,2,3], 1, 0, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, 0.0, 0.0, 1.0e-16). - - -test(lars_InitModelWithDataWithGram_Diffrent_Dimensions, [error(_,system_error('Error'))]) :- - lars_initModelWithDataWithGram([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1], 1, 0, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 4, 0.0, 0.0, 1.0e-16). - - -%% Successful Tests - -test(lars_InitModelWithDataWithGram_Normal_Use) :- - lars_initModelWithDataWithGram([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1], 1, 0, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, 0.0, 0.0, 1.0e-16). +test(lars_initAndTrainModel_Normal_Use) :- + lars_initAndTrainModel(0, 0.0, 0.0, 1.0e-16, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1], 0, _). -test(lars_InitModelWithDataWithGram_Alternative_Use) :- - lars_initModelWithDataWithGram([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1], 1, 0, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, 1.0, -1.0, 1.0e-16). +test(lars_initAndTrainModel_Alternative_Use) :- + lars_initAndTrainModel(1, -1.0, 1.0, 1.0e-16, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 4, [0,1,0], 0, _). -:- end_tests(lars_initModelWithDataWithGram). +:- end_tests(lars_initAndTrainModel). @@ -139,10 +57,6 @@ test(lars_InitModelWithDataWithGram_Alternative_Use) :- :- begin_tests(lars_activeSet). %% Failure Tests - -test(lars_ActiveSet_Before_Train, [error(_,system_error('The Model is not trained!'))]) :- - reset_Model_NoTrain, - lars_activeSet(_). %% Successful Tests @@ -163,19 +77,15 @@ test(lars_ActiveSet_Normal_Use) :- :- begin_tests(lars_beta). %% Failure Tests - -test(lars_Beta_Before_Train, [error(_,system_error('The Model is not trained!'))]) :- - reset_Model_NoTrain, - lars_beta(_). %% Successful Tests -%%test(lars_Beta_Normal_Use) :- -%% reset_Model_WithTrain, -%% lars_beta(BetaList), -%% print('\nBeta : '), -%% print(BetaList). +test(lars_Beta_Normal_Use) :- + reset_Model_WithTrain, + lars_beta(BetaList), + print('\nBeta : '), + print(BetaList). :- end_tests(lars_beta). @@ -187,10 +97,6 @@ test(lars_Beta_Before_Train, [error(_,system_error('The Model is not trained!')) :- begin_tests(lars_betaPath). %% Failure Tests - -test(lars_BetaPath_Before_Train, [error(_,system_error('The Model is not trained!'))]) :- - reset_Model_NoTrain, - lars_betaPath(_, _). %% Successful Tests @@ -212,29 +118,25 @@ test(lars_BetaPath_Normal_Use) :- %% Failure Tests -test(lars_ComputeError_Before_Train, [error(_,system_error('The Model is not trained!'))]) :- - reset_Model_NoTrain, - lars_computeError([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1], 0, _). - - -test(lars_ComputeError_Too_Few_Labels, [error(_,system_error('Error'))]) :- +test(lars_ComputeError_Too_Few_Labels, [error(_,system_error('The number of data points does not match the number of labels!'))]) :- reset_Model_WithTrain, - lars_computeError([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1], 0, _). + lars_computeError([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1], 1, _). -test(lars_ComputeError_Too_Many_Labels, [error(_,system_error('Error'))]) :- +test(lars_ComputeError_Too_Many_Labels, [error(_,system_error('The number of data points does not match the number of labels!'))]) :- reset_Model_WithTrain, - lars_computeError([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1,0,1], 0, _). + lars_computeError([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1,0,1], 1, _). -test(lars_ComputeError_Too_Many_Labelclasses, [error(_,system_error('Error'))]) :- +%% doenst cause an error +test(lars_ComputeError_Too_Many_Labelclasses) :- reset_Model_WithTrain, - lars_computeError([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,2,3], 0, _). + lars_computeError([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,2,3], 1, _). %% Successful Tests test(lars_ComputeError_Normal_Use) :- reset_Model_WithTrain, - lars_computeError([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1], 0, Error), + lars_computeError([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1], 1, Error), print('\nError : '), print(Error). @@ -245,10 +147,10 @@ test(lars_ComputeError_Alternative_Use) :- print(Error). test(lars_ComputeError_CSV_Input) :- - reset_Model_WithTrain, + lars_initAndTrainModel(0, 0.0, 0.0, 1.0e-16, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 4, [0,1,0], 0, _), open('src/data_csv/iris2.csv', read, File), take_csv_row(File, skipFirstRow,10, Data), - lars_computeError(Data, 4, [0,1,0,1,1,0,1,1,1,0], 0, Error), + lars_computeError(Data, 4, [0,1,0,1,1,0,1,1,1,0], 1, Error), print('\nError : '), print(Error). @@ -262,11 +164,6 @@ test(lars_ComputeError_CSV_Input) :- :- begin_tests(lars_lambdaPath). %% Failure Tests - -test(lars_LambdaPath_Before_Train, [error(_,system_error('The Model is not trained!'))]) :- - reset_Model_NoTrain, - lars_lambdaPath(_). - %% Successful Tests @@ -286,11 +183,6 @@ test(lars_LambdaPath_Normal_Use) :- :- begin_tests(lars_matUtriCholFactor). %% Failure Tests - -test(lars_MatUtriCholFactor_Before_Train, [error(_,system_error('The Model is not trained!'))]) :- - reset_Model_NoTrain, - lars_matUtriCholFactor(_, _). - %% Successful Tests @@ -311,104 +203,34 @@ test(lars_MatUtriCholFactor_Normal_Use) :- %% Failure Tests -test(lars_Predict_Before_Train, [error(_,system_error('The Model is not trained!'))]) :- - reset_Model_NoTrain, - lars_predict([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, 1, _). - - -test(lars_Predict_Diffrent_Dims, [error(_,system_error('Error'))]) :- +test(lars_Predict_Diffrent_Dims, [error(_,system_error('The given Datapoints have a diffrent Dimension than trained!'))]) :- reset_Model_WithTrain, - lars_predict([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 4, 1, _). + lars_predict([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 4, _, 1). %% Successful Tests test(lars_Predict_Normal_Use) :- reset_Model_WithTrain, - lars_predict([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, 1, PredictList), + lars_predict([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, PredictList, 1), print('\nPrediction: '), print(PredictList). test(lars_Predict_Alternative_Use) :- reset_Model_WithTrain, - lars_predict([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, 1, PredictList), + lars_predict([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, PredictList, 1), print('\nPrediction: '), print(PredictList). test(lars_Predict_CSV_Input) :- - reset_Model_WithTrain, + lars_initAndTrainModel(0, 0.0, 0.0, 1.0e-16, [5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 4, [0,1,0], 0, _), open('src/data_csv/iris2.csv', read, File), take_csv_row(File, skipFirstRow,10, Data), - lars_predict(Data, 4, 1, PredictList), + lars_predict(Data, 4, PredictList, 1), print('\nPrediction: '), print(PredictList). :- end_tests(lars_predict). - - -%% -%% TESTING predicate lars_train/10 -%% -:- begin_tests(lars_train). - -%% Failure Tests\ - -test(lars_Train_Diffrent_Dims, [error(_,system_error('Error'))]) :- - reset_Model_WithTrain, - lars_train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 4, [0,1,0], 0, _, _). - - -test(lars_Train_Too_Few_Labels, [error(_,system_error('Error'))]) :- - reset_Model_NoTrain, - lars_train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1], 0, _, _). - -test(lars_Train_Too_Many_Labels, [error(_,system_error('Error'))]) :- - reset_Model_NoTrain, - lars_train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1,0,1], 0, _, _). - -test(lars_Train_Too_Many_Labelclasses, [error(_,system_error('Error'))]) :- - reset_Model_NoTrain, - lars_train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,2,3], 0, _, _). - - -%% Successful Tests - -test(lars_Train_Normal_Use) :- - reset_Model_NoTrain, - lars_train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1], 0, BetaList, Error), - print('\nBeta: '), - print(BetaList), - print('\nError: '), - print(Error). - -test(lars_Train_Normal_Use) :- - reset_Model_WithTrain, - lars_train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1], 0, BetaList, Error), - print('\nBeta: '), - print(BetaList), - print('\nError: '), - print(Error). - -test(lars_Train_Alternative_Use) :- - reset_Model_NoTrain, - lars_train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,1], 1, BetaList, Error), - print('\nBeta: '), - print(BetaList), - print('\nError: '), - print(Error). - -test(lars_Train_CSV_Input) :- - reset_Model_NoTrain, - open('src/data_csv/iris2.csv', read, File), - take_csv_row(File, skipFirstRow,10, Data), - lars_train(Data, 4, [0,1,0,1,1,0,1,1,1,0], 0, BetaList, Error), - print('\nBeta: '), - print(BetaList), - print('\nError: '), - print(Error). - -:- end_tests(lars_train). - run_lars_tests :- - run_tests(lars_predict). \ No newline at end of file + run_tests.