Skip to content
Snippets Groups Projects
Commit b7dbae87 authored by Jakhes's avatar Jakhes
Browse files

Preparing Methods for tests

parent 70070da2
Branches
No related tags found
No related merge requests found
Showing
with 823 additions and 466 deletions
:- module(lmnn, [lmnn/20]).
:- module(lmnn, [lmnn/17]).
%% requirements of library(struct)
:- load_files(library(str_decl),
......
:- module(local_coordinate_tests, [run_local_coordinate_tests/0]).
:- module(local_coordinate_coding_tests, [run_local_coordinate_coding_tests/0]).
:- use_module(library(plunit)).
......@@ -39,6 +39,6 @@ test(testDescription4, [true(Error =:= 0.9797958971132711)]) :-
:- end_tests(predicate).
run_local_coordinate_tests :-
run_local_coordinate_coding_tests :-
run_tests.
......@@ -41,3 +41,4 @@ test(testDescription4, [true(Error =:= 0.9797958971132711)]) :-
run_new_method_tests :-
run_tests.
......@@ -18,7 +18,6 @@ using namespace std;
using namespace mlpack::amf;
// TODO:
// input: const TerminationPolicyType & terminationPolicy = TerminationPolicyType(),
// const InitializationRuleType & initializeRule = InitializationRuleType(),
// const UpdateRuleType & update = UpdateRuleType()
......@@ -30,19 +29,25 @@ using namespace mlpack::amf;
// output: double
//
// description:
void nmf(char const *updateRule, SP_integer maxIterations, double minResidue,
// Initilizes the nmf model and applies it to the given data.
//
void nmf(char const *updateRule,
SP_integer maxIterations, double minResidue,
float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum,
SP_integer rank,
float **WMatArr, SP_integer *WMatColNum, SP_integer *WMatRowNum,
float **HMatArr, SP_integer *HMatColNum, SP_integer *HMatRowNum)
{
// convert the Prolog arrays to arma::mat
// convert the Prolog array to arma::mat
mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum);
// create the ReturnMats
mat WReturnMat;
mat HReturnMat;
try
{
// decide which update rule to use
if (strcmp(updateRule, "multdist") == 0)
{
......@@ -61,8 +66,16 @@ void nmf(char const *updateRule, SP_integer maxIterations, double minResidue,
}
else
{
cout << "wrong updateRule input" << endl;
raisePrologDomainExeption(updateRule, 1, "The given UpdateRule is unkown!", "nmf");
return;
}
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
return;
}
// return the Matrices
returnMatrixInformation(WReturnMat, WMatArr, WMatColNum, WMatRowNum);
......
:- module(nmf, [nmf/13]).
:- module(nmf, [nmf/10]).
%% requirements of library(struct)
:- load_files(library(str_decl),
......@@ -31,7 +31,13 @@
%% --Description--
%% Initilizes the nmf model and applies it to the given data.
%%
foreign(nmf, c, nmf(+string, +integer, +float32,
nmf(UpdateRule, MaxIterations, MinResidue, DataList, DataRows, Rank, WList, YCols, HList, ZCols) :-
convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrows, X)),
nmfI(UpdateRule, MaxIterations, MinResidue, X, Xsize, Xrows, Rank, Y, YCols, YRows, Z, ZCols, ZRows),
convert_float_array_to_2d_list(Y, YCols, YRows, WList),
convert_float_array_to_2d_list(Z, ZCols, ZRows, HList).
foreign(nmf, c, nmfI( +string, +integer, +float32,
+pointer(float_array), +integer, +integer,
+integer,
-pointer(float_array), -integer, -integer,
......
:- module(nmf_tests, [run_nmf_tests/0]).
:- use_module(library(plunit)).
:- use_module(nmf).
......@@ -6,51 +9,36 @@
reset_Model :-
initModel(1,0,50,0.0001).
:- begin_tests(lists).
%% alpha tests
test(alpha_std_init) :-
reset_Model,
alpha(0).
test(alpha_wrong_input, fail) :-
reset_Model,
alpha(1).
test(alpha_after_train, A =:= 9223372036854775808) :-
reset_Model,
convert_list_to_float_array([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize),
alpha(A).
%% train tests
test(correct_train) :-
reset_Model,
convert_list_to_float_array([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train, fail) :-
reset_Model,
convert_list_to_float_array([],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train2, fail) :-
reset_Model,
convert_list_to_float_array([],0, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train3, fail) :-
reset_Model,
convert_list_to_float_array([1,2],0, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train3, fail) :-
reset_Model,
convert_list_to_float_array([1,2,44,3],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train4) :-
reset_Model,
convert_list_to_float_array([1,2,44,3],2, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
:- end_tests(lists).
\ No newline at end of file
%%
%% TESTING predicate predicate/10
%%
:- begin_tests(predicate).
%% Failure Tests
test(testDescription, [error(domain_error('expectation' , culprit), _)]) :-
reset_Model_No_Train(perceptron),
train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,0,0,0], 2, culprit, 50, 0.0001, _).
test(testDescription2, [error(_,system_error('The values of the Label have to start at 0 and be >= 0 and < the given numClass!'))]) :-
reset_Model_No_Train(perceptron),
train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,2], 2, perceptron, 50, 0.0001, _).
%% Successful Tests
test(testDescription3, [true(Error =:= 1)]) :-
reset_Model_No_Train(perceptron),
train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,0,0,0], 2, perceptron, 50, 0.0001, Error).
test(testDescription4, [true(Error =:= 0.9797958971132711)]) :-
reset_Model_No_Train(perceptron),
open('/home/afkjakhes/eclipse-workspace/prolog-mlpack-libary/src/data_csv/iris2.csv', read, File),
take_csv_row(File, skipFirstRow,10, Data),
train(Data, 4, [0,1,0,1,1,0,1,1,1,0], 2, perceptron, 50, 0.0001, Error).
:- end_tests(predicate).
run_nmf_tests :-
run_tests.
......@@ -20,7 +20,6 @@ using namespace std;
using namespace mlpack::pca;
// TODO:
// input: const bool scaleData = false,
// const DecompositionPolicy & decomposition = DecompositionPolicy(),
// const arma::mat & data,
......@@ -29,6 +28,8 @@ using namespace mlpack::pca;
// arma::mat & eigvec
// output:
// description:
// Apply Principal Component Analysis to the provided data set.
//
void pca(SP_integer scaleData, char const *decompositionPolicy,
float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum,
float **transformedMatArr, SP_integer *transformedMatColNum, SP_integer *transformedMatRowNum,
......@@ -46,6 +47,8 @@ void pca(SP_integer scaleData, char const *decompositionPolicy,
mat eigVecReturnMat;
try
{
// decide for the decomposition Policy
if (strcmp(decompositionPolicy, "exact") == 0)
{
......@@ -65,7 +68,14 @@ void pca(SP_integer scaleData, char const *decompositionPolicy,
}
else
{
cout << "wrong decompositionPolicy input" << endl;
raisePrologDomainExeption(decompositionPolicy, 2, "The given DecompositionPolicy is unkown!", "pca");
return;
}
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
return;
}
......@@ -77,13 +87,16 @@ void pca(SP_integer scaleData, char const *decompositionPolicy,
returnMatrixInformation(eigVecReturnMat, eigVecMatArr, eigVecMatColNum, eigVecMatRowNum);
}
// TODO:
// input: const bool scaleData = false,
// const DecompositionPolicy & decomposition = DecompositionPolicy(),
// arma::mat & data,
// const size_t newDimension
// output:
// description:
// Use PCA for dimensionality reduction on the given dataset.
// Define the new dimensionality of the data with newDimension.
//
double pcaDimReduction(SP_integer scaleData, char const *decompositionPolicy,
float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum,
SP_integer newDimension,
......@@ -95,6 +108,8 @@ double pcaDimReduction(SP_integer scaleData, char const *decompositionPolicy,
double returnValue;
try
{
// decide for the decomposition Policy
if (strcmp(decompositionPolicy, "exact") == 0)
{
......@@ -114,8 +129,14 @@ double pcaDimReduction(SP_integer scaleData, char const *decompositionPolicy,
}
else
{
cout << "wrong decompositionPolicy input" << endl;
returnValue = -1.0;
raisePrologDomainExeption(decompositionPolicy, 2, "The given DecompositionPolicy is unkown!", "pca");
return 0.0;
}
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
return 0.0;
}
......@@ -125,13 +146,16 @@ double pcaDimReduction(SP_integer scaleData, char const *decompositionPolicy,
return returnValue;
}
// TODO:
// input: const bool scaleData = false,
// const DecompositionPolicy & decomposition = DecompositionPolicy(),
// arma::mat & data,
// const double varRetained
// output:
// description:
// Use PCA for dimensionality reduction on the given dataset.
// Define to which variance the data should be reduced to.
//
double pcaVarianceDimReduction(SP_integer scaleData, char const *decompositionPolicy,
float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum,
double varRetained,
......@@ -143,6 +167,8 @@ double pcaVarianceDimReduction(SP_integer scaleData, char const *decompositionPo
double returnValue;
try
{
// decide for the decomposition Policy
if (strcmp(decompositionPolicy, "exact") == 0)
{
......@@ -162,8 +188,14 @@ double pcaVarianceDimReduction(SP_integer scaleData, char const *decompositionPo
}
else
{
cout << "wrong decompositionPolicy input" << endl;
returnValue = -1.0;
raisePrologDomainExeption(decompositionPolicy, 2, "The given DecompositionPolicy is unkown!", "pca");
return 0.0;
}
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
return 0.0;
}
......
:- module(pca, [pca/13,
pcaDimReduction/10,
pcaVarianceDimReduction/10]).
:- module(pca, [pca/9,
pcaDimReduction/8,
pcaVarianceDimReduction/8]).
%% requirements of library(struct)
:- load_files(library(str_decl),
......@@ -32,7 +32,14 @@
%% --Description--
%% Apply Principal Component Analysis to the provided data set.
%%
foreign(pca, c, pca( +integer, +string,
pca(ScaleData, DecompositionPolicy, DataList, DataRows, TransformedList, TDataCols, EigValList, EigVecList, ZCols) :-
convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrows, X)),
pcaI(ScaleData, DecompositionPolicy, X, Xsize, Xrows, TData, TDataCols, TDataRows, Y, Ysize, Z, ZCols, ZRows),
convert_float_array_to_2d_list(TData, TDataCols, TDataRows, TransformedList),
convert_float_array_to_list(Y, Ysize, EigValList),
convert_float_array_to_2d_list(Z, ZCols, ZRows, EigVecList).
foreign(pca, c, pcaI( +integer, +string,
+pointer(float_array), +integer, +integer,
-pointer(float_array), -integer, -integer,
-pointer(float_array), -integer,
......@@ -53,7 +60,12 @@ foreign(pca, c, pca( +integer, +string,
%% Use PCA for dimensionality reduction on the given dataset.
%% Define the new dimensionality of the data with newDimension.
%%
foreign(pcaDimReduction, c, pcaDimReduction( +integer, +string,
pcaDimReduction(ScaleData, DecompositionPolicy, DataList, DataRows, NewDim, TransformedList, TDataCols, Variance) :-
convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrows, X)),
pcaDimReductionI(ScaleData, DecompositionPolicy, X, Xsize, Xrows, NewDim, TData, TDataCols, TDataRows, Variance),
convert_float_array_to_2d_list(TData, TDataCols, TDataRows, TransformedList).
foreign(pcaDimReduction, c, pcaDimReductionI( +integer, +string,
+pointer(float_array), +integer, +integer,
+integer,
-pointer(float_array), -integer, -integer,
......@@ -74,7 +86,12 @@ foreign(pcaDimReduction, c, pcaDimReduction( +integer, +string,
%% Use PCA for dimensionality reduction on the given dataset.
%% Define to which variance the data should be reduced to.
%%
foreign(pcaVarianceDimReduction, c, pcaVarianceDimReduction( +integer, +string,
pcaVarianceDimReduction(ScaleData, DecompositionPolicy, DataList, DataRows, VarRetained, TransformedList, TDataCols, Variance) :-
convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrows, X)),
pcaVarianceDimReductionI(ScaleData, DecompositionPolicy, X, Xsize, Xrows, VarRetained, TData, TDataCols, TDataRows, Variance),
convert_float_array_to_2d_list(TData, TDataCols, TDataRows, TransformedList).
foreign(pcaVarianceDimReduction, c, pcaVarianceDimReductionI( +integer, +string,
+pointer(float_array), +integer, +integer,
+float32,
-pointer(float_array), -integer, -integer,
......
:- module(pca_tests, [run_pca_tests/0]).
:- use_module(library(plunit)).
:- use_module(pca).
......@@ -17,3 +20,36 @@ test(alpha_after_train) :-
print(TOut).
:- end_tests(lists).
%%
%% TESTING predicate predicate/10
%%
:- begin_tests(predicate).
%% Failure Tests
test(testDescription, [error(domain_error('expectation' , culprit), _)]) :-
reset_Model_No_Train(perceptron),
train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,0,0,0], 2, culprit, 50, 0.0001, _).
test(testDescription2, [error(_,system_error('The values of the Label have to start at 0 and be >= 0 and < the given numClass!'))]) :-
reset_Model_No_Train(perceptron),
train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,2], 2, perceptron, 50, 0.0001, _).
%% Successful Tests
test(testDescription3, [true(Error =:= 1)]) :-
reset_Model_No_Train(perceptron),
train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,0,0,0], 2, perceptron, 50, 0.0001, Error).
test(testDescription4, [true(Error =:= 0.9797958971132711)]) :-
reset_Model_No_Train(perceptron),
open('/home/afkjakhes/eclipse-workspace/prolog-mlpack-libary/src/data_csv/iris2.csv', read, File),
take_csv_row(File, skipFirstRow,10, Data),
train(Data, 4, [0,1,0,1,1,0,1,1,1,0], 2, perceptron, 50, 0.0001, Error).
:- end_tests(predicate).
run_pca_tests :-
run_tests.
......@@ -18,104 +18,151 @@ using namespace mlpack::perceptron;
// Global Variable of the GlobalMethodObject object so it can be accessed from all functions
Perceptron perceptronGlobal;
// TODO:
// input: const size_t numClasses = 0,
// const size_t dimensionality = 0,
// const size_t maxIterations = 1000
// output:
// description:
// Initilizes the perceptron model and its weight matrix but doesnt train it.
//
void initModelNoTrain(SP_integer numClasses, SP_integer dimensionality, SP_integer maxIterations)
{
try
{
perceptronGlobal = Perceptron(numClasses, dimensionality, maxIterations);
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
return;
}
}
// TODO:
// input: const MatType & data,
// const arma::Row< size_t > & labels,
// const size_t numClasses,
// const size_t maxIterations = 1000
// output:
// description:
void initModelWithTrain(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum, float *labelsArr, SP_integer labelsArrSize, SP_integer numClasses, SP_integer maxIterations)
// Initilizes the perceptron model and its weight matrix and trains it with the given data.
//
void initModelWithTrain(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum,
float *labelsArr, SP_integer labelsArrSize,
SP_integer numClasses, SP_integer maxIterations)
{
// convert the Prolog arrays to arma::mat
// convert the Prolog array to arma::mat
mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum);
// convert the Prolog arrays to arma::rowvec
// convert the Prolog array to arma::rowvec
Row< size_t > labelsVector = convertArrayToVec(labelsArr, labelsArrSize);
try
{
perceptronGlobal = Perceptron(data, labelsVector, numClasses, maxIterations);
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
return;
}
}
// TODO:
// input:
// output: arma::vec& biases
// description:
// Get the biases.
//
void biases(float **biasesArr, SP_integer *biasesArrSize)
{
// create the ReturnVector
vec biasesReturnVector = perceptronGlobal.Biases();
// return the Vector lenght
*biasesArrSize = biasesReturnVector.n_elem;
// return the Vector as Array
*biasesArr = convertToArray(biasesReturnVector);
// return Vector
returnVectorInformation(biasesReturnVector, biasesArr, biasesArrSize);
}
// TODO:
// input: const MatType & test,
// arma::Row< size_t > & predictedLabels <-
// output:
// description:
void classify(float *testMatArr, SP_integer testMatSize, SP_integer testMatRowNum, float **predicLabeslsArr, SP_integer *predicLabeslsArrSize)
// After training, use the weights matrix to classify test, and put the predicted classes in predictedLabels.
//
void classify(float *testMatArr, SP_integer testMatSize, SP_integer testMatRowNum,
float **predictLabelsArr, SP_integer *predictLabelsArrSize)
{
// convert the Prolog arrays to arma::mat
mat test = convertArrayToMat(testMatArr, testMatSize, testMatRowNum);
// create the ReturnVector
Row< size_t > predicLabeslsReturnVector;
Row< size_t > predictLabelsReturnVector;
perceptronGlobal.Classify(test, predicLabeslsReturnVector);
// return the Vector lenght
*predicLabeslsArrSize = predicLabeslsReturnVector.n_elem;
try
{
perceptronGlobal.Classify(test, predictLabelsReturnVector);
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
return;
}
// return the Vector as Array
*predicLabeslsArr = convertToArray(predicLabeslsReturnVector);
// return the Vector
returnVectorInformation(predictLabelsReturnVector, predictLabelsArr, predictLabelsArrSize);
}
// TODO:
// input: const MatType & data,
// const arma::Row< size_t > & labels,
// const size_t numClasses,
// const arma::rowvec & instanceWeights = arma::rowvec()
// output:
// description:
void train(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum, float *labelsArr, SP_integer labelsArrSize, SP_integer numClasses, float *instanceWeightsArr, SP_integer instanceWeightsArrSize)
// Train the perceptron on the given data for up to the maximum number of iterations. This training does not reset the model weights, so you can call train/8 on multiple datasets sequentially.
//
void train(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum,
float *labelsArr, SP_integer labelsArrSize,
SP_integer numClasses,
float *instanceWeightsArr, SP_integer instanceWeightsArrSize)
{
// convert the Prolog arrays to arma::mat
mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum);
// convert the Prolog arrays to arma::rowvec
// convert the Prolog array
Row< size_t > labelsVector = convertArrayToVec(labelsArr, labelsArrSize);
// convert the Prolog arrays to arma::rowvec
rowvec instanceWeightsVector = convertArrayToRowvec(instanceWeightsArr, instanceWeightsArrSize);
// use the model function
try
{
perceptronGlobal.Train(data, labelsVector, numClasses, instanceWeightsVector);
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
return;
}
}
// TODO:
// input:
// output: const arma::mat& weights
// description:
// Get the weight matrix.
//
void weights(float **weightsMatArr, SP_integer *weightsMatColNum, SP_integer *weightsMatRowNum)
{
// create the ReturnMat
mat weightsReturnMat = perceptronGlobal.Weights();
// return the Mat
returnMatrixInformation(weightsReturnMat, weightsMatArr, weightsMatColNum, weightsMatRowNum);
}
:- module(perceptron, [ initModelNoTrain/3,
initModelWithTrain/7,
biases/2,
classify/5,
train/8,
weights/3]).
initModelWithTrain/5,
biases/1,
classify/3,
train/5,
weights/2]).
%% requirements of library(struct)
:- load_files(library(str_decl),
......@@ -31,12 +31,15 @@
%% --Description--
%% Initilizes the perceptron model and its weight matrix but doesnt train it.
%%
foreign(initModelNoTrain, c, initModelNoTrain(+integer, +integer, +integer)).
initModelNoTrain(NumClasses, Dimensionality, MaxIterations) :-
initModelNoTrainI(NumClasses, Dimensionality, MaxIterations).
foreign(initModelNoTrain, c, initModelNoTrainI(+integer, +integer, +integer)).
%% --Input--
%% mat data => data(float_array), dataSize, dataRowNum,
%% vec labels => labels(float_array), labelsSize,
%% mat data,
%% vec labels,
%% int numClasses,
%% int maxIterations => 1000
%%
......@@ -44,7 +47,12 @@ foreign(initModelNoTrain, c, initModelNoTrain(+integer, +integer, +integer)).
%% --Description--
%% Initilizes the perceptron model and its weight matrix and trains it with the given data.
%%
foreign(initModelWithTrain, c, initModelWithTrain( +pointer(float_array), +integer, +integer,
initModelWithTrain(DataList, DataRows, LabelsList, NumClasses, MaxIterations) :-
convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrownum, X)),
convert_list_to_float_array(LabelsList, array(Ysize, Y)),
initModelWithTrainI(X, Xsize, Xrownum, Y, Ysize, NumClasses, MaxIterations).
foreign(initModelWithTrain, c, initModelWithTrainI( +pointer(float_array), +integer, +integer,
+pointer(float_array), +integer,
+integer, +integer)).
......@@ -57,32 +65,47 @@ foreign(initModelWithTrain, c, initModelWithTrain( +pointer(float_array), +
%% --Description--
%% Get the biases.
%%
foreign(biases, c, biases(-pointer(float_array), -integer)).
biases(BiasesList) :-
biasesI(Y, Ysize),
convert_float_array_to_list(Y, Ysize, BiasesList).
foreign(biases, c, biasesI(-pointer(float_array), -integer)).
%% --Input--
%% mat test => test(float_array), testSize, testRowNum,
%% mat test
%%
%% --Output--
%% vec predicLabels => predicLabels(float_array), predicLabelsSize
%% vec predicLabels
%%
%% --Description--
%% After training, use the weights matrix to classify test, and put the predicted classes in predictedLabels.
%%
foreign(classify, c, classify( +pointer(float_array), +integer, +integer,
classify(TestList, TestRows, PredictLabelList) :-
convert_list_to_float_array(TestList, TestRows, array(Xsize, Xrows, X)),
classifyI(X, Xsize, Xrows, Y, Ysize),
convert_float_array_to_list(Y, Ysize, PredictLabelList).
foreign(classify, c, classifyI( +pointer(float_array), +integer, +integer,
-pointer(float_array), -integer)).
%% --Input--
%% mat data => data(float_array), dataSize, dataRowNum,
%% vec labels => labels(float_array), labelsSize,
%% mat data,
%% vec labels,
%% int numClasses,
%% vec instWeights => instWeights(float_array), instWeightsSize = arma::rowvec()
%% vec instWeights
%%
%% --Output--
%% --Description--
%% Train the perceptron on the given data for up to the maximum number of iterations. This training does not reset the model weights, so you can call train/8 on multiple datasets sequentially.
%%
foreign(train, c, train(+pointer(float_array), +integer, +integer,
train(DataList, DataRows, LabelsList, NumClasses, WeightsList) :-
convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrownum, X)),
convert_list_to_float_array(LabelsList, array(Ysize, Y)),
convert_list_to_float_array(WeightsList, array(Zsize, Z)),
trainI(X, Xsize, Xrownum, Y, Ysize, NumClasses, Z, Zsize).
foreign(train, c, trainI( +pointer(float_array), +integer, +integer,
+pointer(float_array), +integer,
+integer,
+pointer(float_array), +integer)).
......@@ -91,12 +114,16 @@ foreign(train, c, train(+pointer(float_array), +integer, +integer,
%% --Input--
%%
%% --Output--
%% mat weights => weights(float_array), weightsColNum, weightsRowNum,
%% mat weights
%%
%% --Description--
%% Get the weight matrix.
%%
foreign(weights, c, weights(-pointer(float_array), -integer, -integer)).
weights(WeightsList, XCols) :-
weightsI(X, XCols, XRows),
convert_float_array_to_2d_list(X, XCols, XRows, WeightsList).
foreign(weights, c, weightsI(-pointer(float_array), -integer, -integer)).
%% Defines the functions that get connected from main.cpp
......
:- module(perceptron_tests, [run_perceptron_tests/0]).
:- use_module(library(plunit)).
:- use_module(perceptron).
......@@ -6,51 +9,36 @@
reset_Model :-
initModel(1,0,50,0.0001).
:- begin_tests(lists).
%% alpha tests
test(alpha_std_init) :-
reset_Model,
alpha(0).
test(alpha_wrong_input, fail) :-
reset_Model,
alpha(1).
test(alpha_after_train, A =:= 9223372036854775808) :-
reset_Model,
convert_list_to_float_array([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize),
alpha(A).
%% train tests
test(correct_train) :-
reset_Model,
convert_list_to_float_array([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train, fail) :-
reset_Model,
convert_list_to_float_array([],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train2, fail) :-
reset_Model,
convert_list_to_float_array([],0, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train3, fail) :-
reset_Model,
convert_list_to_float_array([1,2],0, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train3, fail) :-
reset_Model,
convert_list_to_float_array([1,2,44,3],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train4) :-
reset_Model,
convert_list_to_float_array([1,2,44,3],2, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
:- end_tests(lists).
\ No newline at end of file
%%
%% TESTING predicate predicate/10
%%
:- begin_tests(predicate).
%% Failure Tests
test(testDescription, [error(domain_error('expectation' , culprit), _)]) :-
reset_Model_No_Train(perceptron),
train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,0,0,0], 2, culprit, 50, 0.0001, _).
test(testDescription2, [error(_,system_error('The values of the Label have to start at 0 and be >= 0 and < the given numClass!'))]) :-
reset_Model_No_Train(perceptron),
train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,2], 2, perceptron, 50, 0.0001, _).
%% Successful Tests
test(testDescription3, [true(Error =:= 1)]) :-
reset_Model_No_Train(perceptron),
train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,0,0,0], 2, perceptron, 50, 0.0001, Error).
test(testDescription4, [true(Error =:= 0.9797958971132711)]) :-
reset_Model_No_Train(perceptron),
open('/home/afkjakhes/eclipse-workspace/prolog-mlpack-libary/src/data_csv/iris2.csv', read, File),
take_csv_row(File, skipFirstRow,10, Data),
train(Data, 4, [0,1,0,1,1,0,1,1,1,0], 2, perceptron, 50, 0.0001, Error).
:- end_tests(predicate).
run_perceptron_tests :-
run_tests.
......@@ -18,7 +18,7 @@ using namespace mlpack::radical;
// Global Variable of the Radical object so it can be accessed from all functions
Radical radicalObj;
// TODO:
// input: const double noiseStdDev = 0.175,
// const size_t replicates = 30,
// const size_t angles = 150,
......@@ -26,18 +26,30 @@ Radical radicalObj;
// const size_t m = 0
// output:
// description:
// Initilizes the radical model.
//
void initModel(double noiseStdDev, SP_integer replicates, SP_integer angles, SP_integer sweeps, SP_integer m)
{
try
{
radicalObj = Radical(noiseStdDev, replicates, angles, sweeps, m);
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
}
}
// TODO:
// input: const arma::mat & matX,
// arma::mat & matY,
// arma::mat & matW,
// util::Timers & timers = IO::GetTimers()
// output:
// description:
// Run RADICAL.
//
void doRadical(float *xMatArr, SP_integer xMatSize, SP_integer xMatRowNum,
float **yMatArr, SP_integer *yMatColNum, SP_integer *yMatRowNum,
float **wMatArr, SP_integer *wMatColNum, SP_integer *wMatRowNum)
......@@ -50,7 +62,14 @@ void doRadical(float *xMatArr, SP_integer xMatSize, SP_integer xMatRowNum,
mat wReturnMat;
try
{
radicalObj.DoRadical(x, yReturnMat, wReturnMat);
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
}
// return the Matrices
......@@ -58,16 +77,26 @@ void doRadical(float *xMatArr, SP_integer xMatSize, SP_integer xMatRowNum,
returnMatrixInformation(wReturnMat, wMatArr, wMatColNum, wMatRowNum);
}
// TODO:
// input: const arma::mat & matX,
// util::Timers & timers = IO::GetTimers()
//
// output: double output
// description:
// Two-dimensional version of RADICAL.
//
double doRadical2D(float *xMatArr, SP_integer xMatSize, SP_integer xMatRowNum)
{
// convert the Prolog array to arma::mat
mat x = convertArrayToMat(xMatArr, xMatSize, xMatRowNum);
try
{
return radicalObj.DoRadical2D(x);
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
}
}
\ No newline at end of file
:- module(radical, [ initModel/5,
doRadical/9,
doRadical2D/4]).
doRadical/6,
doRadical2D/3]).
%% requirements of library(struct)
:- load_files(library(str_decl),
......@@ -31,7 +31,10 @@
%% --Description--
%% Initilizes the radical model.
%%
foreign(initModel, c, initModel(+float32, +integer, +integer, +integer, +integer)).
initModel(NoiseStdDev, Replicates, Angles, Sweeps, M) :-
initModelI(NoiseStdDev, Replicates, Angles, Sweeps, M).
foreign(initModel, c, initModelI(+float32, +integer, +integer, +integer, +integer)).
%% --Input--
......@@ -44,7 +47,13 @@ foreign(initModel, c, initModel(+float32, +integer, +integer, +integer, +integer
%% --Description--
%% Run RADICAL.
%%
foreign(doRadical, c, doRadical(+pointer(float_array), +integer, +integer,
doRadical(DataList, DataRows, YList, YCols, WList, ZCols) :-
convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrows, X)),
doRadicalI(X, Xsize, Xrows, Y, YCols, YRows, Z, ZCols, ZRows),
convert_float_array_to_2d_list(Y, YCols, YRows, YList),
convert_float_array_to_2d_list(Z, ZCols, ZRows, WList).
foreign(doRadical, c, doRadicalI(+pointer(float_array), +integer, +integer,
-pointer(float_array), -integer, -integer,
-pointer(float_array), -integer, -integer)).
......@@ -58,7 +67,11 @@ foreign(doRadical, c, doRadical(+pointer(float_array), +integer, +integer,
%% --Description--
%% Two-dimensional version of RADICAL.
%%
foreign(doRadical2D, c, doRadical2D( +pointer(float_array), +integer, +integer,
doRadical2D(DataList, DataRows, Result) :-
convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrows, X)),
doRadical2DI(X, Xsize, Xrows, Result).
foreign(doRadical2D, c, doRadical2DI( +pointer(float_array), +integer, +integer,
[-float32])).
......
:- module(radical_tests, [run_radical_tests/0]).
:- use_module(library(plunit)).
:- use_module(radical).
......@@ -6,51 +9,36 @@
reset_Model :-
initModel(1,0,50,0.0001).
:- begin_tests(lists).
%% alpha tests
test(alpha_std_init) :-
reset_Model,
alpha(0).
test(alpha_wrong_input, fail) :-
reset_Model,
alpha(1).
test(alpha_after_train, A =:= 9223372036854775808) :-
reset_Model,
convert_list_to_float_array([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize),
alpha(A).
%% train tests
test(correct_train) :-
reset_Model,
convert_list_to_float_array([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train, fail) :-
reset_Model,
convert_list_to_float_array([],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train2, fail) :-
reset_Model,
convert_list_to_float_array([],0, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train3, fail) :-
reset_Model,
convert_list_to_float_array([1,2],0, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train3, fail) :-
reset_Model,
convert_list_to_float_array([1,2,44,3],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train4) :-
reset_Model,
convert_list_to_float_array([1,2,44,3],2, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
:- end_tests(lists).
\ No newline at end of file
%%
%% TESTING predicate predicate/10
%%
:- begin_tests(predicate).
%% Failure Tests
test(testDescription, [error(domain_error('expectation' , culprit), _)]) :-
reset_Model_No_Train(perceptron),
train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,0,0,0], 2, culprit, 50, 0.0001, _).
test(testDescription2, [error(_,system_error('The values of the Label have to start at 0 and be >= 0 and < the given numClass!'))]) :-
reset_Model_No_Train(perceptron),
train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,2], 2, perceptron, 50, 0.0001, _).
%% Successful Tests
test(testDescription3, [true(Error =:= 1)]) :-
reset_Model_No_Train(perceptron),
train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,0,0,0], 2, perceptron, 50, 0.0001, Error).
test(testDescription4, [true(Error =:= 0.9797958971132711)]) :-
reset_Model_No_Train(perceptron),
open('/home/afkjakhes/eclipse-workspace/prolog-mlpack-libary/src/data_csv/iris2.csv', read, File),
take_csv_row(File, skipFirstRow,10, Data),
train(Data, 4, [0,1,0,1,1,0,1,1,1,0], 2, perceptron, 50, 0.0001, Error).
:- end_tests(predicate).
run_radical_tests :-
run_tests.
......@@ -18,16 +18,18 @@ using namespace mlpack::tree;
// Global Variable of the RandomForest object so it can be accessed from all functions
RandomForest randomForest;
// TODO:
// input:
// output:
// description:
// Initilizes the model without training it.
//
void initModelNoTrain(SP_integer)
{
randomForest = RandomForest();
}
// TODO:
// input: const MatType & dataset,
// const arma::Row< size_t > & labels,
// const size_t numClasses,
......@@ -38,19 +40,31 @@ void initModelNoTrain(SP_integer)
// DimensionSelectionType dimensionSelector = DimensionSelectionType()
// output:
// description:
void initModelWithTrainNoWeights(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum, float *labelsArr, SP_integer labelsArrSize,
// Initilizes the model and trains it but does not apply weights to it.
//
void initModelWithTrainNoWeights(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum,
float *labelsArr, SP_integer labelsArrSize,
SP_integer numClasses, SP_integer numTrees, SP_integer minimumLeafSize, double minimumGainSplit, SP_integer maximumDepth)
{
// convert the Prolog arrays to arma::mat
// convert the Prolog array to arma::mat
mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum);
// convert the Prolog arrays to arma::rowvec
// convert the Prolog array to arma::rowvec
Row< size_t > labelsVector = convertArrayToVec(labelsArr, labelsArrSize);
try
{
randomForest = RandomForest(data, labelsVector, numClasses, numTrees, minimumLeafSize, minimumGainSplit, maximumDepth);
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
return;
}
}
// TODO:
// input: onst MatType & dataset,
// const arma::Row< size_t > & labels,
// const size_t numClasses,
......@@ -62,29 +76,44 @@ void initModelWithTrainNoWeights(float *dataMatArr, SP_integer dataMatSize, SP_i
// DimensionSelectionType dimensionSelector = DimensionSelectionType()
// output:
// description:
void initModelWithTrainWithWeights(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum, float *labelsArr, SP_integer labelsArrSize, SP_integer numClasses,
float *weightsArr, SP_integer weightsArrSize, SP_integer numTrees, SP_integer minimumLeafSize, double minimumGainSplit, SP_integer maximumDepth)
// Initilizes the model, trains it and applies weights to it.
//
void initModelWithTrainWithWeights(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum,
float *labelsArr, SP_integer labelsArrSize,
SP_integer numClasses,
float *weightsArr, SP_integer weightsArrSize,
SP_integer numTrees, SP_integer minimumLeafSize, double minimumGainSplit, SP_integer maximumDepth)
{
// convert the Prolog array to arma::mat
mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum);
// convert the Prolog array to arma::rowvec
// convert the Prolog array to arma::Row< size_t >
Row< size_t > labelsVector = convertArrayToVec(labelsArr, labelsArrSize);
// convert the Prolog array to arma::rowvec
rowvec weightsVector = convertArrayToRowvec(weightsArr, weightsArrSize);
try
{
randomForest = RandomForest(data, labelsVector, numClasses, weightsVector, numTrees, minimumLeafSize, minimumGainSplit, maximumDepth);
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
return;
}
}
// TODO:
// input: const VecType & point,
// size_t & prediction <-,
// arma::vec & probabilities <-
// output:
// description:
void classifyPoint(float *pointArr, SP_integer pointArrSize, SP_integer *prediction, float **probsArr, SP_integer *probsArrSize)
// Predict the class of the given point and return the predicted class probabilities for each class.
//
void classifyPoint(float *pointArr, SP_integer pointArrSize,
SP_integer *prediction,
float **probsArr, SP_integer *probsArrSize)
{
// convert the Prolog array to arma::rowvec
rowvec pointVector = convertArrayToRowvec(pointArr, pointArrSize);
......@@ -94,61 +123,75 @@ void classifyPoint(float *pointArr, SP_integer pointArrSize, SP_integer *predict
size_t predicReturn;
try
{
randomForest.Classify(pointVector, predicReturn, probsReturnVector);
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
return;
}
// return the predic value
*prediction = predicReturn;
// return the Vector lenght
*probsArrSize = probsReturnVector.n_elem;
// return the Vector as Array
*probsArr = convertToArray(probsReturnVector);
// return the Vector
returnVectorInformation(probsReturnVector, probsArr, probsArrSize);
}
// TODO:
// input: const MatType & data,
// arma::Row< size_t > & predictions <-,
// arma::mat & probabilities <-
// output:
// description:
void classifyMatrix(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum, float **predicArr, SP_integer *predicArrSize, float **probsMatArr, SP_integer *probsMatColNum, SP_integer *probsMatRowNum)
// Predict the classes of each point in the given dataset, also returning the predicted class probabilities for each point.
//
void classifyMatrix(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum,
float **predicArr, SP_integer *predicArrSize,
float **probsMatArr, SP_integer *probsMatColNum, SP_integer *probsMatRowNum)
{
// convert the Prolog arrays to arma::mat
// convert the Prolog array to arma::mat
mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum);
// create the ReturnVector
Row< size_t > predicReturnVector;
// create the ReturnMat
mat probsReturnMat;
randomForest.Classify(data, predicReturnVector, probsReturnMat);
// return the Vector lenght
*predicArrSize = predicReturnVector.n_elem;
// return the Vector as Array
*predicArr = convertToArray(predicReturnVector);
try
{
randomForest.Classify(data, predicReturnVector, probsReturnMat);
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
return;
}
// return the Matrix dimensions
*probsMatColNum = probsReturnMat.n_cols;
*probsMatRowNum = probsReturnMat.n_rows;
// return the Matrix as one long Array
*probsMatArr = convertToArray(probsReturnMat);
// return the Vector
returnVectorInformation(predicReturnVector, predicArr, predicArrSize);
// return the Matrix
returnMatrixInformation(probsReturnMat, probsMatArr, probsMatColNum, probsMatRowNum);
}
// TODO:
// input:
// output: const size_t number of trees
// description:
// Get the number of trees in the forest.
//
SP_integer numTrees()
{
return randomForest.NumTrees();
}
// TODO: doesnt accept warmStart parameter
//
// input: const MatType & data,
......@@ -163,17 +206,29 @@ SP_integer numTrees()
//
// output: double average entropy of all trees trained
// description:
double trainNoWeights(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum, float *labelsArr, SP_integer labelsArrSize,
// Train the random forest on the given labeled training data with the given number of trees.
//
double trainNoWeights(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum,
float *labelsArr, SP_integer labelsArrSize,
SP_integer numClasses, SP_integer numTrees, SP_integer minimumLeafSize, double minimumGainSplit, SP_integer maximumDepth)
{
// convert the Prolog arrays to arma::mat
// convert the Prolog array to arma::mat
mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum);
// convert the Prolog arrays to arma::rowvec
// convert the Prolog array to arma::rowvec
Row< size_t > labelsVector = convertArrayToVec(labelsArr, labelsArrSize);
try
{
return randomForest.Train(data, labelsVector, numClasses, numTrees, minimumLeafSize, minimumGainSplit, maximumDepth);
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
return 0.0;
}
}
// TODO: doesnt accept warmStart parameter
//
......@@ -190,18 +245,26 @@ double trainNoWeights(float *dataMatArr, SP_integer dataMatSize, SP_integer data
//
// output: double average entropy of all trees trained
// description:
// Train the random forest on the given weighted labeled training data with the given number of trees.
//
double trainWithWeights(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum, float *labelsArr, SP_integer labelsArrSize, SP_integer numClasses,
float *weightsArr, SP_integer weightsArrSize, SP_integer numTrees, SP_integer minimumLeafSize, double minimumGainSplit, SP_integer maximumDepth)
{
// convert the Prolog arrays to arma::mat
mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum);
// convert the Prolog arrays to arma::rowvec
Row< size_t > labelsVector = convertArrayToVec(labelsArr, labelsArrSize);
// convert the Prolog arrays to arma::rowvec
rowvec weightsVector = convertArrayToRowvec(weightsArr, weightsArrSize);
try
{
return randomForest.Train(data, labelsVector, numClasses, weightsVector, numTrees, minimumLeafSize, minimumGainSplit, maximumDepth);
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
return 0.0;
}
}
:- module(random_forest, [ initModelNoTrain/1,
initModelWithTrainNoWeights/10,
initModelWithTrainWithWeights/12,
classifyPoint/5,
classifyMatrix/8,
:- module(random_forest, [ initModelNoTrain/0,
initModelWithTrainNoWeights/8,
initModelWithTrainWithWeights/9,
classifyPoint/3,
classifyMatrix/5,
numTrees/1,
trainNoWeights/11,
trainWithWeights/13]).
trainNoWeights/9,
trainWithWeights/10]).
%% requirements of library(struct)
:- load_files(library(str_decl),
......@@ -32,7 +32,10 @@
%% --Description--
%% Initilizes the model without training it.
%%
foreign(initModelNoTrain, c, initModelNoTrain(+integer)).
initModelNoTrain :-
initModelNoTrainI(0).
foreign(initModelNoTrain, c, initModelNoTrainI(+integer)).
%% --Input--
......@@ -49,7 +52,12 @@ foreign(initModelNoTrain, c, initModelNoTrain(+integer)).
%% --Description--
%% Initilizes the model and trains it but does not apply weights to it.
%%
foreign(initModelWithTrainNoWeights, c, initModelWithTrainNoWeights(
initModelWithTrainNoWeights(DataList, DataRows, LabelsList, NumClasses, NumTrees, MinimumLeafSize, MinimumGainSplit, MaximumDepth) :-
convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrownum, X)),
convert_list_to_float_array(LabelsList, array(Ysize, Y)),
initModelWithTrainNoWeightsI(X, Xsize, Xrownum, Y, Ysize, NumClasses, NumTrees, MinimumLeafSize, MinimumGainSplit, MaximumDepth).
foreign(initModelWithTrainNoWeights, c, initModelWithTrainNoWeightsI(
+pointer(float_array), +integer, +integer,
+pointer(float_array), +integer,
+integer, +integer, +integer, +float32, +integer)).
......@@ -70,7 +78,13 @@ foreign(initModelWithTrainNoWeights, c, initModelWithTrainNoWeights(
%% --Description--
%% Initilizes the model, trains it and applies weights to it.
%%
foreign(initModelWithTrainWithWeights, c, initModelWithTrainWithWeights(
initModelWithTrainWithWeights(DataList, DataRows, LabelsList, NumClasses, WeightsList, NumTrees, MinimumLeafSize, MinimumGainSplit, MaximumDepth) :-
convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrownum, X)),
convert_list_to_float_array(LabelsList, array(Ysize, Y)),
convert_list_to_float_array(WeightsList, array(Zsize, Z)),
initModelWithTrainWithWeightsI(X, Xsize, Xrownum, Y, Ysize, NumClasses, Z, Zsize, NumTrees, MinimumLeafSize, MinimumGainSplit, MaximumDepth).
foreign(initModelWithTrainWithWeights, c, initModelWithTrainWithWeightsI(
+pointer(float_array), +integer, +integer,
+pointer(float_array), +integer,
+integer,
......@@ -89,7 +103,12 @@ foreign(initModelWithTrainWithWeights, c, initModelWithTrainWithWeights(
%% Predict the class of the given point and return the predicted class probabilities for each class.
%% Random forest has to be train before using this.
%%
foreign(classifyPoint, c, classifyPoint(+pointer(float_array), +integer,
classifyPoint(DataList, Prediction, AssignList) :-
convert_list_to_float_array(DataList, array(Xsize, X)),
classifyPointI(X, Xsize, Prediction, Y, Ysize),
convert_float_array_to_list(Y, Ysize, AssignList).
foreign(classifyPoint, c, classifyPointI( +pointer(float_array), +integer,
-integer,
-pointer(float_array), -integer)).
......@@ -105,7 +124,13 @@ foreign(classifyPoint, c, classifyPoint(+pointer(float_array), +integer,
%% Predict the classes of each point in the given dataset, also returning the predicted class probabilities for each point.
%% Random forest has to be train before using this.
%%
foreign(classifyMatrix, c, classifyMatrix( +pointer(float_array), +integer, +integer,
classifyMatrix(DataList, DataRows, PredictionList, ProbsList, ZCols) :-
convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrows, X)),
classifyMatrixI(X, Xsize, Xrows, Y, Ysize, Z, ZCols, ZRows),
convert_float_array_to_list(Y, Ysize, PredictionList),
convert_float_array_to_2d_list(Z, ZCols, ZRows, ProbsList).
foreign(classifyMatrix, c, classifyMatrixI( +pointer(float_array), +integer, +integer,
-pointer(float_array), -integer,
-pointer(float_array), -integer, -integer)).
......@@ -118,7 +143,9 @@ foreign(classifyMatrix, c, classifyMatrix( +pointer(float_array), +integer,
%% --Description--
%% Get the number of trees in the forest.
%%
foreign(numTrees, c, numTrees([-integer])).
numTrees(NumTrees) :-
numTreesI(NumTrees).
foreign(numTrees, c, numTreesI([-integer])).
%% --Input--
......@@ -137,7 +164,12 @@ foreign(numTrees, c, numTrees([-integer])).
%% Train the random forest on the given labeled training data with the given number of trees.
%% The minimumLeafSize and minimumGainSplit parameters are given to each individual decision tree during tree building.
%%
foreign(trainNoWeights, c, trainNoWeights( +pointer(float_array), +integer, +integer,
trainNoWeights(DataList, DataRows, LabelsList, NumClasses, NumTrees, MinimumLeafSize, MinimumGainSplit, MaximumDepth, Entropy) :-
convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrownum, X)),
convert_list_to_float_array(LabelsList, array(Ysize, Y)),
trainNoWeightsI(X, Xsize, Xrownum, Y, Ysize, NumClasses, NumTrees, MinimumLeafSize, MinimumGainSplit, MaximumDepth, Entropy).
foreign(trainNoWeights, c, trainNoWeightsI( +pointer(float_array), +integer, +integer,
+pointer(float_array), +integer,
+integer, +integer, +integer, +float32, +integer,
[-float32])).
......@@ -160,7 +192,13 @@ foreign(trainNoWeights, c, trainNoWeights( +pointer(float_array), +integer,
%% Train the random forest on the given weighted labeled training data with the given number of trees.
%% The minimumLeafSize and minimumGainSplit parameters are given to each individual decision tree during tree building.
%%
foreign(trainWithWeights, c, trainWithWeights( +pointer(float_array), +integer, +integer,
trainWithWeights(DataList, DataRows, LabelsList, NumClasses, WeightsList, NumTrees, MinimumLeafSize, MinimumGainSplit, MaximumDepth, Entropy) :-
convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrownum, X)),
convert_list_to_float_array(LabelsList, array(Ysize, Y)),
convert_list_to_float_array(WeightsList, array(Zsize, Z)),
trainWithWeightsI(X, Xsize, Xrownum, Y, Ysize, NumClasses, Z, Zsize, NumTrees, MinimumLeafSize, MinimumGainSplit, MaximumDepth, Entropy).
foreign(trainWithWeights, c, trainWithWeightsI( +pointer(float_array), +integer, +integer,
+pointer(float_array), +integer,
+integer,
+pointer(float_array), +integer,
......
:- module(random_forest_tests, [run_random_forest_tests/0]).
:- use_module(library(plunit)).
:- use_module(random_forest).
......@@ -6,51 +9,36 @@
reset_Model :-
initModel(1,0,50,0.0001).
:- begin_tests(lists).
%% alpha tests
test(alpha_std_init) :-
reset_Model,
alpha(0).
test(alpha_wrong_input, fail) :-
reset_Model,
alpha(1).
test(alpha_after_train, A =:= 9223372036854775808) :-
reset_Model,
convert_list_to_float_array([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize),
alpha(A).
%% train tests
test(correct_train) :-
reset_Model,
convert_list_to_float_array([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train, fail) :-
reset_Model,
convert_list_to_float_array([],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train2, fail) :-
reset_Model,
convert_list_to_float_array([],0, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train3, fail) :-
reset_Model,
convert_list_to_float_array([1,2],0, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train3, fail) :-
reset_Model,
convert_list_to_float_array([1,2,44,3],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train4) :-
reset_Model,
convert_list_to_float_array([1,2,44,3],2, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
:- end_tests(lists).
\ No newline at end of file
%%
%% TESTING predicate predicate/10
%%
:- begin_tests(predicate).
%% Failure Tests
test(testDescription, [error(domain_error('expectation' , culprit), _)]) :-
reset_Model_No_Train(perceptron),
train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,0,0,0], 2, culprit, 50, 0.0001, _).
test(testDescription2, [error(_,system_error('The values of the Label have to start at 0 and be >= 0 and < the given numClass!'))]) :-
reset_Model_No_Train(perceptron),
train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,1,0,2], 2, perceptron, 50, 0.0001, _).
%% Successful Tests
test(testDescription3, [true(Error =:= 1)]) :-
reset_Model_No_Train(perceptron),
train([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5], 3, [0,0,0,0], 2, perceptron, 50, 0.0001, Error).
test(testDescription4, [true(Error =:= 0.9797958971132711)]) :-
reset_Model_No_Train(perceptron),
open('/home/afkjakhes/eclipse-workspace/prolog-mlpack-libary/src/data_csv/iris2.csv', read, File),
take_csv_row(File, skipFirstRow,10, Data),
train(Data, 4, [0,1,0,1,1,0,1,1,1,0], 2, perceptron, 50, 0.0001, Error).
:- end_tests(predicate).
run_random_forest_tests :-
run_tests.
......@@ -18,19 +18,21 @@ using namespace mlpack::regression;
// Global Variable of the SoftmaxRegression object so it can be accessed from all functions
SoftmaxRegression softmaxRegression;
// TODO:
// input: const size_t inputSize = 0,
// const size_t numClasses = 0,
// const bool fitIntercept = false//
// output:
// description:
// Initializes the softmax_regression model without training.
//
void initModelNoTrain(SP_integer inputSize, SP_integer numClasses,
SP_integer fitIntercept)
{
softmaxRegression = SoftmaxRegression(inputSize, numClasses, (fitIntercept == 1));
}
// TODO:
// input: const arma::mat & data,
// const arma::Row< size_t > & labels,
// const size_t numClasses,
......@@ -39,36 +41,61 @@ void initModelNoTrain(SP_integer inputSize, SP_integer numClasses,
// OptimizerType optimizer = OptimizerType()
// output:
// description:
void initModelWithTrain(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum, float *labelsArr, SP_integer labelsArrSize, SP_integer numClasses, double lambda, SP_integer fitIntercept)
// Initializes the softmax_regression model and trains it.
//
void initModelWithTrain(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum,
float *labelsArr, SP_integer labelsArrSize,
SP_integer numClasses, double lambda,
SP_integer fitIntercept)
{
// convert the Prolog arrays to arma::mat
// convert the Prolog array to arma::mat
mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum);
// convert the Prolog arrays to arma::rowvec
// convert the Prolog array to arma::rowvec
Row< size_t > labelsVector = convertArrayToVec(labelsArr, labelsArrSize);
try
{
softmaxRegression = SoftmaxRegression(data, labelsVector, numClasses, lambda, (fitIntercept == 1));
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
}
}
// TODO:
// input: const VecType & point
//
// output: size_t predicted label of point
// description:
// Classify the given point.
//
SP_integer classifyPoint(float *pointArr, SP_integer pointArrSize)
{
// convert the Prolog arrays to arma::rowvec
rowvec pointVector = convertArrayToRowvec(pointArr, pointArrSize);
try
{
return softmaxRegression.Classify(pointVector);
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
return 0;
}
}
// TODO:
// input: const arma::mat & dataset,
// arma::Row< size_t > & labels <-,
// arma::mat & probabilities <-
// output:
// description:
// Classify the given points, returning class probabilities and predicted class label for each point.
//
void classifyMatrix(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum, float **labelsArr, SP_integer *labelsArrSize, float **probsMatArr, SP_integer *probsMatColNum, SP_integer *probsMatRowNum)
{
// convert the Prolog arrays to arma::mat
......@@ -76,70 +103,82 @@ void classifyMatrix(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMa
// create the ReturnVector
Row< size_t > labelsReturnVector;
// create the ReturnMat
mat probsReturnMat;
softmaxRegression.Classify(data, labelsReturnVector, probsReturnMat);
// return the Vector lenght
*labelsArrSize = labelsReturnVector.n_elem;
// return the Vector as Array
*labelsArr = convertToArray(labelsReturnVector);
try
{
softmaxRegression.Classify(data, labelsReturnVector, probsReturnMat);
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
return;
}
// return the Matrix dimensions
*probsMatColNum = probsReturnMat.n_cols;
*probsMatRowNum = probsReturnMat.n_rows;
// return the Matrix as one long Array
*probsMatArr = convertToArray(probsReturnMat);
// return the Vector
returnVectorInformation(labelsReturnVector, labelsArr, labelsArrSize);
// return the Matrix
returnMatrixInformation(probsReturnMat, probsMatArr, probsMatColNum, probsMatRowNum);
}
// TODO:
// input: const arma::mat & testData,
// const arma::Row< size_t > & labels
//
// output: double accuracy
// description:
// Computes accuracy of the learned model given the feature data and the labels associated with each data point.
//
double computeAccuracy(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum, float *labelsArr, SP_integer labelsArrSize)
{
// convert the Prolog arrays to arma::mat
// convert the Prolog array to arma::mat
mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum);
// convert the Prolog arrays to arma::rowvec
// convert the Prolog array to arma::rowvec
Row< size_t > labelsVector = convertArrayToVec(labelsArr, labelsArrSize);
try
{
return softmaxRegression.ComputeAccuracy(data, labelsVector);
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
return 0.0;
}
}
// TODO:
// input:
// output: size_t
// description:
// Gets the features size of the training data.
//
SP_integer featureSize()
{
return softmaxRegression.FeatureSize();
}
// TODO:
// input:
// output: arma::mat&
// description:
// Get the model parameters.
//
void parameters(float **parametersMatArr, SP_integer *parametersMatColNum, SP_integer *parametersMatRowNum)
{
// create the ReturnMat
mat parametersReturnMat = softmaxRegression.Parameters();
// return the Matrix dimensions
*parametersMatColNum = parametersReturnMat.n_cols;
*parametersMatRowNum = parametersReturnMat.n_rows;
// return the Matrix as one long Array
*parametersMatArr = convertToArray(parametersReturnMat);
// return the Matrix
returnMatrixInformation(parametersReturnMat, parametersMatArr, parametersMatColNum, parametersMatRowNum);
}
// TODO:
// input: const arma::mat & data,
// const arma::Row< size_t > & labels,
// const size_t numClasses,
......@@ -147,13 +186,23 @@ void parameters(float **parametersMatArr, SP_integer *parametersMatColNum, SP_in
//
// output: double objective value of final point
// description:
// Trains the softmax regression model with the given training data.
//
double train(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum, float *labelsArr, SP_integer labelsArrSize, SP_integer numClasses)
{
// convert the Prolog arrays to arma::mat
// convert the Prolog array to arma::mat
mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum);
// convert the Prolog arrays to arma::rowvec
// convert the Prolog array to arma::rowvec
Row< size_t > labelsVector = convertArrayToVec(labelsArr, labelsArrSize);
try
{
return softmaxRegression.Train(data, labelsVector, numClasses);
}
catch(const std::exception& e)
{
raisePrologSystemExeption(e.what());
return 0.0;
}
}
:- module(softmax_regression, [ initModelNoTrain/3,
initModelWithTrain/8,
classifyPoint/3,
classifyMatrix/8,
computeAccuracy/6,
initModelWithTrain/6,
classifyPoint/2,
classifyMatrix/5,
computeAccuracy/4,
featureSize/1,
parameters/3,
train/7]).
parameters/2,
train/5]).
%% requirements of library(struct)
:- load_files(library(str_decl),
......@@ -35,7 +35,10 @@
%% Initializes the softmax_regression model without training.
%% Be sure to use Train before calling Classif or ComputeAccuracy, otherwise the results may be meaningless.
%%
foreign(initModelNoTrain, c, initModelNoTrain( +integer, +integer,
initModelNoTrain(InputSize, NumClasses, FitIntercept) :-
initModelNoTrainI(InputSize, NumClasses, FitIntercept).
foreign(initModelNoTrain, c, initModelNoTrainI( +integer, +integer,
+integer)).
......@@ -51,7 +54,12 @@ foreign(initModelNoTrain, c, initModelNoTrain( +integer, +integer,
%% --Description--
%% Initializes the softmax_regression model and trains it.
%%
foreign(initModelWithTrain, c, initModelWithTrain( +pointer(float_array), +integer, +integer,
initModelWithTrain(DataList, DataRows, LabelsList, NumClasses, Lambda, FitIntercept) :-
convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrownum, X)),
convert_list_to_float_array(LabelsList, array(Ysize, Y)),
initModelWithTrainI(X, Xsize, Xrownum, Y, Ysize, NumClasses, Lambda, FitIntercept).
foreign(initModelWithTrain, c, initModelWithTrainI( +pointer(float_array), +integer, +integer,
+pointer(float_array), +integer,
+integer, +float32,
+integer)).
......@@ -66,7 +74,11 @@ foreign(initModelWithTrain, c, initModelWithTrain( +pointer(float_array), +
%% --Description--
%% Classify the given point.
%%
foreign(classifyPoint, c, classifyPoint(+pointer(float_array), +integer,
classifyPoint(DataList, Prediction) :-
convert_list_to_float_array(DataList, array(Xsize, X)),
classifyPointI(X, Xsize, Prediction).
foreign(classifyPoint, c, classifyPointI(+pointer(float_array), +integer,
[-integer])).
......@@ -80,7 +92,13 @@ foreign(classifyPoint, c, classifyPoint(+pointer(float_array), +integer,
%% --Description--
%% Classify the given points, returning class probabilities and predicted class label for each point.
%%
foreign(classifyMatrix, c, classifyMatrix( +pointer(float_array), +integer, +integer,
classifyMatrix(DataList, DataRows, PredictionList, ProbsList, ZCols) :-
convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrows, X)),
classifyMatrixI(X, Xsize, Xrows, Y, Ysize, Z, ZCols, ZRows),
convert_float_array_to_list(Y, Ysize, PredictionList),
convert_float_array_to_2d_list(Z, ZCols, ZRows, ProbsList).
foreign(classifyMatrix, c, classifyMatrixI( +pointer(float_array), +integer, +integer,
-pointer(float_array), -integer,
-pointer(float_array), -integer, -integer)).
......@@ -96,7 +114,11 @@ foreign(classifyMatrix, c, classifyMatrix( +pointer(float_array), +integer,
%% Computes accuracy of the learned model given the feature data and the labels associated with each data point.
%% Predictions are made using the provided data and are compared with the actual labels.
%%
foreign(computeAccuracy, c, computeAccuracy( +pointer(float_array), +integer, +integer,
computeAccuracy(DataList, DataRows, LabelsList, Accuracy) :-
convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrownum, X)),
convert_list_to_float_array(LabelsList, array(Ysize, Y)),
computeAccuracyI(X, Xsize, Xrownum, Y, Ysize, Accuracy).
foreign(computeAccuracy, c, computeAccuracyI( +pointer(float_array), +integer, +integer,
+pointer(float_array), +integer,
[-float32])).
......@@ -109,7 +131,10 @@ foreign(computeAccuracy, c, computeAccuracy( +pointer(float_array), +integer,
%% --Description--
%% Gets the features size of the training data.
%%
foreign(featureSize, c, featureSize([-integer])).
featureSize(FeatureSize) :-
featureSizeI(FeatureSize).
foreign(featureSize, c, featureSizeI([-integer])).
%% --Input--
%%
......@@ -119,7 +144,11 @@ foreign(featureSize, c, featureSize([-integer])).
%% --Description--
%% Get the model parameters.
%%
foreign(parameters, c, parameters(-pointer(float_array), -integer, -integer)).
parameters(PraametersList, XCols) :-
parametersI(X, XCols, XRows),
convert_float_array_to_2d_list(X, XCols, XRows, PraametersList).
foreign(parameters, c, parametersI(-pointer(float_array), -integer, -integer)).
%% --Input--
......@@ -133,7 +162,12 @@ foreign(parameters, c, parameters(-pointer(float_array), -integer, -integer)).
%% --Description--
%% Trains the softmax regression model with the given training data.
%%
foreign(train, c, train(+pointer(float_array), +integer, +integer,
train(DataList, DataRows, LabelsList, NumClasses, FinalValue) :-
convert_list_to_float_array(DataList, DataRows, array(Xsize, Xrownum, X)),
convert_list_to_float_array(LabelsList, array(Ysize, Y)),
trainI(X, Xsize, Xrownum, Y, Ysize, NumClasses, FinalValue).
foreign(train, c, trainI( +pointer(float_array), +integer, +integer,
+pointer(float_array), +integer,
+integer,
[-float32])).
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment