Skip to content
Snippets Groups Projects
Commit 52e5272b authored by Jakhes's avatar Jakhes
Browse files

Adding arrayToColvec function

parent 8017e514
No related branches found
No related tags found
No related merge requests found
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
// Extra functions to reduce some code for the conversion between arma and float *array // Extra functions to reduce some code for the conversion between arma and float *array
// arma --> array
float *convertToArray(colvec vec) float *convertToArray(colvec vec)
{ {
vector<float> vectorData = conv_to<vector<float>>::from(vec); vector<float> vectorData = conv_to<vector<float>>::from(vec);
...@@ -80,6 +82,9 @@ float *convertToArray(vector<vec> matrix) ...@@ -80,6 +82,9 @@ float *convertToArray(vector<vec> matrix)
return convertToArray(newVec); return convertToArray(newVec);
} }
// array --> arma
rowvec convertArrayToRowvec(float *arr, int vecSize) rowvec convertArrayToRowvec(float *arr, int vecSize)
{ {
rowvec rVector(vecSize); rowvec rVector(vecSize);
...@@ -90,6 +95,16 @@ rowvec convertArrayToRowvec(float *arr, int vecSize) ...@@ -90,6 +95,16 @@ rowvec convertArrayToRowvec(float *arr, int vecSize)
return rVector; return rVector;
} }
colvec convertArrayToColvec(float *arr, int vecSize)
{
colvec rVector(vecSize);
for(int i = 0; i < vecSize; i++)
{
rVector[i] = arr[i];
}
return rVector;
}
Row<size_t> convertArrayToVec(float *arr, int vecSize) Row<size_t> convertArrayToVec(float *arr, int vecSize)
{ {
Row<size_t> rVector(vecSize); Row<size_t> rVector(vecSize);
...@@ -110,6 +125,9 @@ mat convertArrayToMat(float *arr, int vecSize, int rowCount) ...@@ -110,6 +125,9 @@ mat convertArrayToMat(float *arr, int vecSize, int rowCount)
return matrix; return matrix;
} }
void returnMatrixInformation(mat matrix, float **mat, SP_integer *matColNum, SP_integer *matRowNum) void returnMatrixInformation(mat matrix, float **mat, SP_integer *matColNum, SP_integer *matRowNum)
{ {
// return the Matrix dimensions // return the Matrix dimensions
......
...@@ -31,6 +31,8 @@ float *convertToArray(vector<vec> vec); ...@@ -31,6 +31,8 @@ float *convertToArray(vector<vec> vec);
rowvec convertArrayToRowvec(float *arr, int vecSize); rowvec convertArrayToRowvec(float *arr, int vecSize);
colvec convertArrayToColvec(float *arr, int vecSize);
Row<size_t> convertArrayToVec(float *arr, int vecSize); Row<size_t> convertArrayToVec(float *arr, int vecSize);
mat convertArrayToMat(float *arr, int vecSize, int rowCount); mat convertArrayToMat(float *arr, int vecSize, int rowCount);
......
...@@ -18,6 +18,7 @@ using namespace mlpack::svm; ...@@ -18,6 +18,7 @@ using namespace mlpack::svm;
// Global Variable of the LinearSVM object so it can be accessed from all functions // Global Variable of the LinearSVM object so it can be accessed from all functions
LinearSVM<> linearSVM; LinearSVM<> linearSVM;
bool isModelTrained = false;
// input: const MatType & data, // input: const MatType & data,
...@@ -39,6 +40,13 @@ void initModelWithTrain(float *dataMatArr, SP_integer dataMatSize, SP_integer da ...@@ -39,6 +40,13 @@ void initModelWithTrain(float *dataMatArr, SP_integer dataMatSize, SP_integer da
{ {
// convert the Prolog array to arma::mat // convert the Prolog array to arma::mat
mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum); mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum);
// check if labels fit the data
if (data.n_cols != labelsArrSize)
{
raisePrologSystemExeption("The number of data points does not match the number of labels!");
return;
}
// convert the Prolog array to arma::rowvec // convert the Prolog array to arma::rowvec
arma::Row<size_t> labels = convertArrayToVec(labelsArr, labelsArrSize); arma::Row<size_t> labels = convertArrayToVec(labelsArr, labelsArrSize);
...@@ -47,10 +55,12 @@ void initModelWithTrain(float *dataMatArr, SP_integer dataMatSize, SP_integer da ...@@ -47,10 +55,12 @@ void initModelWithTrain(float *dataMatArr, SP_integer dataMatSize, SP_integer da
if (strcmp(optimizer, "lbfgs") == 0) if (strcmp(optimizer, "lbfgs") == 0)
{ {
linearSVM.Train<ens::L_BFGS>(data, labels, numClasses); linearSVM.Train<ens::L_BFGS>(data, labels, numClasses);
isModelTrained = true;
} }
else if (strcmp(optimizer, "psgd") == 0) else if (strcmp(optimizer, "psgd") == 0)
{ {
linearSVM.Train<ens::ParallelSGD<>>(data, labels, numClasses, ens::ParallelSGD(100, 4)); linearSVM.Train<ens::ParallelSGD<>>(data, labels, numClasses, ens::ParallelSGD(100, 4));
isModelTrained = true;
} }
else else
{ {
...@@ -71,6 +81,8 @@ void initModelWithTrain(float *dataMatArr, SP_integer dataMatSize, SP_integer da ...@@ -71,6 +81,8 @@ void initModelWithTrain(float *dataMatArr, SP_integer dataMatSize, SP_integer da
void initModelNoTrain(SP_integer numClasses, double lambda, double delta, SP_integer fitIntercept) void initModelNoTrain(SP_integer numClasses, double lambda, double delta, SP_integer fitIntercept)
{ {
linearSVM = LinearSVM<>(numClasses, lambda, delta, true); linearSVM = LinearSVM<>(numClasses, lambda, delta, true);
isModelTrained = false;
} }
...@@ -85,8 +97,20 @@ void classify(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNu ...@@ -85,8 +97,20 @@ void classify(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNu
float **labelsArr, SP_integer *labelsArrSize, float **labelsArr, SP_integer *labelsArrSize,
float **scoresMatArr, SP_integer *scoresMatColNum, SP_integer *scoresMatRowNum) float **scoresMatArr, SP_integer *scoresMatColNum, SP_integer *scoresMatRowNum)
{ {
if (!isModelTrained)
{
raisePrologSystemExeption("The Model is not trained!");
return;
}
// convert the Prolog array to arma::mat // convert the Prolog array to arma::mat
mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum); mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum);
// check if data has the correct feature size
if (data.n_rows != linearSVM.FeatureSize())
{
raisePrologSystemExeption("The given Datapoints Dimension is != the trained Datapoints Dimension!");
return;
}
// get the ReturnVector // get the ReturnVector
arma::Row<size_t> labelsReturnVector; arma::Row<size_t> labelsReturnVector;
...@@ -119,12 +143,23 @@ void classify(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNu ...@@ -119,12 +143,23 @@ void classify(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNu
// //
SP_integer classifyPoint(float *pointArr, SP_integer pointArrSize) SP_integer classifyPoint(float *pointArr, SP_integer pointArrSize)
{ {
if (!isModelTrained)
{
raisePrologSystemExeption("The Model is not trained!");
return 0;
}
// convert the Prolog array to arma::rowvec // convert the Prolog array to arma::rowvec
rowvec pointVector = convertArrayToRowvec(pointArr, pointArrSize); vec pointVector = convertArrayToColvec(pointArr, pointArrSize);
// check if data has the correct feature size
if (pointArrSize != linearSVM.FeatureSize())
{
raisePrologSystemExeption("The given Datapoint Dimension is != the trained Datapoints Dimension!");
return 0;
}
try try
{ {
return linearSVM.Classify<rowvec>(pointVector); return linearSVM.Classify<vec>(pointVector);
} }
catch(const std::exception& e) catch(const std::exception& e)
{ {
...@@ -143,8 +178,26 @@ SP_integer classifyPoint(float *pointArr, SP_integer pointArrSize) ...@@ -143,8 +178,26 @@ SP_integer classifyPoint(float *pointArr, SP_integer pointArrSize)
// //
double computeAccuracy(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum, float *labelsArr, SP_integer labelsArrSize) double computeAccuracy(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum, float *labelsArr, SP_integer labelsArrSize)
{ {
if (!isModelTrained)
{
raisePrologSystemExeption("The Model is not trained!");
return 0.0;
}
// convert the Prolog array to arma::mat // convert the Prolog array to arma::mat
mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum); mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum);
// check if data has the correct feature size
if (data.n_rows != linearSVM.FeatureSize())
{
raisePrologSystemExeption("The given Datapoints Dimension is != the trained Datapoints Dimension!");
return 0.0;
}
// check if labels fit the data
if (data.n_cols != labelsArrSize)
{
raisePrologSystemExeption("The number of data points does not match the number of labels!");
return 0.0;
}
// convert the Prolog array to arma::rowvec // convert the Prolog array to arma::rowvec
arma::Row<size_t> labels = convertArrayToVec(labelsArr, labelsArrSize); arma::Row<size_t> labels = convertArrayToVec(labelsArr, labelsArrSize);
...@@ -174,6 +227,13 @@ double train(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum ...@@ -174,6 +227,13 @@ double train(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum
{ {
// convert the Prolog array to arma::mat // convert the Prolog array to arma::mat
mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum); mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum);
// check if labels fit the data
if (data.n_cols != labelsArrSize)
{
raisePrologSystemExeption("The number of data points does not match the number of labels!");
return 0.0;
}
// convert the Prolog array to arma::rowvec // convert the Prolog array to arma::rowvec
arma::Row<size_t> labels = convertArrayToVec(labelsArr, labelsArrSize); arma::Row<size_t> labels = convertArrayToVec(labelsArr, labelsArrSize);
...@@ -181,10 +241,12 @@ double train(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum ...@@ -181,10 +241,12 @@ double train(float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum
{ {
if (strcmp(optimizer, "lbfgs") == 0) if (strcmp(optimizer, "lbfgs") == 0)
{ {
isModelTrained = true;
return linearSVM.Train<ens::L_BFGS>(data, labels, numClasses); return linearSVM.Train<ens::L_BFGS>(data, labels, numClasses);
} }
else if (strcmp(optimizer, "psgd") == 0) else if (strcmp(optimizer, "psgd") == 0)
{ {
isModelTrained = true;
return linearSVM.Train<ens::ParallelSGD<>>(data, labels, numClasses, ens::ParallelSGD(100, 4)); return linearSVM.Train<ens::ParallelSGD<>>(data, labels, numClasses, ens::ParallelSGD(100, 4));
} }
else else
......
...@@ -37,7 +37,12 @@ void lmnn(char const *optimizer, ...@@ -37,7 +37,12 @@ void lmnn(char const *optimizer,
{ {
// convert the Prolog arrays to arma::mat // convert the Prolog arrays to arma::mat
mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum); mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum);
// check if labels fit the data
if (data.n_cols != labelsArrSize)
{
raisePrologSystemExeption("The number of data points does not match the number of labels!");
return;
}
// convert the Prolog arrays to arma::rowvec // convert the Prolog arrays to arma::rowvec
Row< size_t > labelsVector = convertArrayToVec(labelsArr, labelsArrSize); Row< size_t > labelsVector = convertArrayToVec(labelsArr, labelsArrSize);
...@@ -84,7 +89,7 @@ void lmnn(char const *optimizer, ...@@ -84,7 +89,7 @@ void lmnn(char const *optimizer,
lmnn.LearnDistance(outputReturnMat); lmnn.LearnDistance(outputReturnMat);
} }
else if (strcmp(optimizer, "amsgrad") == 0) else if (strcmp(optimizer, "bbsgd") == 0)
{ {
LMNN<LMetric<2>, ens::BBS_BB> lmnn(data, labelsVector, k); LMNN<LMetric<2>, ens::BBS_BB> lmnn(data, labelsVector, k);
lmnn.Regularization() = regularization; lmnn.Regularization() = regularization;
...@@ -97,7 +102,7 @@ void lmnn(char const *optimizer, ...@@ -97,7 +102,7 @@ void lmnn(char const *optimizer,
lmnn.LearnDistance(outputReturnMat); lmnn.LearnDistance(outputReturnMat);
} }
else if (strcmp(optimizer, "amsgrad") == 0) else if (strcmp(optimizer, "sgd") == 0)
{ {
// copied advise from the lmnn.main file // copied advise from the lmnn.main file
// Using SGD is not recommended as the learning matrix can // Using SGD is not recommended as the learning matrix can
...@@ -113,7 +118,7 @@ void lmnn(char const *optimizer, ...@@ -113,7 +118,7 @@ void lmnn(char const *optimizer,
lmnn.LearnDistance(outputReturnMat); lmnn.LearnDistance(outputReturnMat);
} }
else if (strcmp(optimizer, "amsgrad") == 0) else if (strcmp(optimizer, "lbfgs") == 0)
{ {
LMNN<LMetric<2>, ens::L_BFGS> lmnn(data, labelsVector, k); LMNN<LMetric<2>, ens::L_BFGS> lmnn(data, labelsVector, k);
lmnn.Regularization() = regularization; lmnn.Regularization() = regularization;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment