Skip to content
Snippets Groups Projects
Commit f35cbdc1 authored by Jakhes's avatar Jakhes
Browse files

Adding AdaBoost to the project

parent 3726a83c
No related branches found
No related tags found
No related merge requests found
......@@ -5,8 +5,10 @@ all:
make -C src/methods/bayesian_linear_regression splfr=$(SPLFR_PATH)
make -C src/methods/linear_regression splfr=$(SPLFR_PATH)
make -C src/methods/lars splfr=$(SPLFR_PATH)
make -C src/methods/ada_boost splfr=$(SPLFR_PATH)
clean:
make -C src/methods/bayesian_linear_regression clean
make -C src/methods/linear_regression clean
make -C src/methods/lars clean
make -C src/methods/ada_boost clean
splfr=/usr/local/sicstus4.7.1/bin/splfr
METHOD_NAME=ada_boost
$(METHOD_NAME).so: $(METHOD_NAME).pl $(METHOD_NAME).cpp
$(splfr) -larmadillo -fopenmp -lmlpack -lstdc++ -cxx --struct $(METHOD_NAME).pl $(METHOD_NAME).cpp ../../helper_files/helper.cpp
clean:
rm $(METHOD_NAME).so
#include <sicstus/sicstus.h>
/* ex_glue.h is generated by splfr from the foreign/[2,3] facts.
Always include the glue header in your foreign resource code.
*/
#include "ada_boost_glue.h"
#include <mlpack/methods/adaboost/adaboost.hpp>
#include <mlpack/core.hpp>
// including helper functions for converting between arma structures and arrays
#include "../../helper_files/helper.hpp"
// some of the most used namespaces
using namespace arma;
using namespace mlpack;
using namespace std;
using namespace mlpack::adaboost;
using namespace mlpack::tree;
using namespace mlpack::perceptron;
using namespace mlpack::util;
// Global Variable of the GlobalMethodObject object so it can be accessed from all functions
AdaBoost<> adaBoostPerceptron;
AdaBoost<DecisionTree<>> adaBoostDecisionStump;
bool usingPerceptron = true;
// TODO:
// input: const MatType & data,
// const arma::Row< size_t > & labels,
// const size_t numClasses,
// const WeakLearnerType & other,
// const size_t iterations = 100,
// const double tolerance = 1e-6
// output:
// description:
void initModelWithTraining(float *dataArr, SP_integer dataSize, SP_integer dataRowNum, float *labelsArr, SP_integer labelSize, SP_integer numClasses, char const *learner,SP_integer iterations, double tolerance)
{
// convert the Prolog arrays to arma::mat
mat data = convertArrayToMat(dataArr, dataSize, dataRowNum);
// convert the Prolog arrays to arma::rowvec
rowvec labels = convertArrayToRowvec(labelsArr, labelSize);
Row<size_t> convLabels = conv_to<Row<size_t>>::from(labels);
if(strcmp(learner, "perceptron") == 0)
{
cout << "perceptron" << endl;
usingPerceptron = true;
adaBoostPerceptron = AdaBoost<Perceptron<>>(data, convLabels, numClasses, iterations, tolerance);
}
else if (strcmp(learner, "decision_stump") == 0)
{
cout << "decision_stump" << endl;
usingPerceptron = false;
adaBoostDecisionStump = AdaBoost<DecisionStump<>>(data, convLabels, numClasses, iterations, tolerance);
}
else cout << "wrong input" << endl;
}
// TODO:
// input: const double tolerance = 1e-6
// output:
// description:
void initModelNoTraining(double tol = 1e-6, char const *learner = "perceptron")
{
if(strcmp(learner, "perceptron") == 0)
{
cout << "perceptron" << endl;
usingPerceptron = true;
adaBoostPerceptron = AdaBoost<Perceptron<>>(tol);
}
else if (strcmp(learner, "decision_stump") == 0)
{
cout << "decision_stump" << endl;
usingPerceptron = false;
adaBoostDecisionStump = AdaBoost<DecisionStump<>>(tol);
}
else cout << "wrong input" << endl;
}
// TODO:
// input: const MatType & test,
// arma::Row< size_t > & predictedLabels,
// arma::mat & probabilities
// output:
// description:
void classifyWithProb(float *testMatArr, SP_integer testMatSize, SP_integer testMatRowNum, float **predLabelsArr, SP_integer *predLabelsArrSize, float **probMatArr, SP_integer *probMatColNum, SP_integer *probMatRowNum)
{
// convert the Prolog arrays to arma::mat
mat data = convertArrayToMat(testMatArr, testMatSize, testMatRowNum);
// get the ReturnVector get the ReturnMat
Row< size_t > predLabelsVector;
mat probMat;
if(usingPerceptron)
{
adaBoostPerceptron.Classify(data, predLabelsVector, probMat);
}
else
{
adaBoostDecisionStump.Classify(data, predLabelsVector, probMat);
}
vec convPredLabels = conv_to<vec>::from(predLabelsVector);
// return the Vector lenght
*predLabelsArrSize = convPredLabels.n_elem;
// return the Vector as Array
*predLabelsArr = convertToArray(convPredLabels);
// return the Matrix dimensions
*probMatColNum = probMat.n_cols;
*probMatRowNum = probMat.n_rows;
// return the Matrix as one long Array
*probMatArr = convertToArray(probMat);
}
// TODO:
// input: const MatType & test,
// arma::Row< size_t > & predictedLabels
// output:
// description:
void classifyNoProb(float *testMatArr, SP_integer testMatSize, SP_integer testMatRowNum, float **predLabelsArr, SP_integer *predLabelsArrSize)
{
// convert the Prolog arrays to arma::mat
mat data = convertArrayToMat(testMatArr, testMatSize, testMatRowNum);
// get the ReturnVector
Row< size_t > predLabelsVector;
if(usingPerceptron)
{
adaBoostPerceptron.Classify(data, predLabelsVector);
}
else
{
adaBoostDecisionStump.Classify(data, predLabelsVector);
}
vec convPredLabels = conv_to<vec>::from(predLabelsVector);
// return the Vector lenght
*predLabelsArrSize = convPredLabels.n_elem;
// return the Vector as Array
*predLabelsArr = convertToArray(convPredLabels);
}
// TODO:
// input:
// output: size_t classesNum
// description:
SP_integer numClasses()
{
if(usingPerceptron)
{
return adaBoostPerceptron.NumClasses();
}
else
{
return adaBoostDecisionStump.NumClasses();
}
}
// TODO:
// input:
// output: double tol
// description:
double getTolerance()
{
if(usingPerceptron)
{
return adaBoostPerceptron.Tolerance();
}
else
{
return adaBoostDecisionStump.Tolerance();
}
}
// TODO:
// input:
// output: double& tol
// description:
void modifyTolerance(double newTol)
{
if(usingPerceptron)
{
double& tol = adaBoostPerceptron.Tolerance();
tol = newTol;
}
else
{
double& tol = adaBoostDecisionStump.Tolerance();
tol = newTol;
}
}
// TODO:
// input: const MatType & data,
// const arma::Row< size_t > & labels,
// const size_t numClasses,
// const WeakLearnerType & learner,
// const size_t iterations = 100,
// const double tolerance = 1e-6
//
// output: double upper bound training error
// description:
double train(float *dataArr, SP_integer dataSize, SP_integer dataRowNum, float *labelsArr, SP_integer labelSize, SP_integer numClasses, char const *learner,SP_integer iterations, double tolerance)
{
// convert the Prolog arrays to arma::mat
mat data = convertArrayToMat(dataArr, dataSize, dataRowNum);
// convert the Prolog arrays to arma::rowvec
rowvec labels = convertArrayToRowvec(labelsArr, labelSize);
Row<size_t> convLabels = conv_to<Row<size_t>>::from(labels);
if(strcmp(learner, "perceptron") == 0 && usingPerceptron)
{
cout << "perceptron" << endl;
return adaBoostPerceptron.Train(data, convLabels, numClasses, Perceptron(), iterations, tolerance);
}
else if (strcmp(learner, "decision_stump") == 0 && !usingPerceptron)
{
cout << "decision_stump" << endl;
return adaBoostDecisionStump.Train(data, convLabels, numClasses, DecisionStump<>(), iterations, tolerance);
}
else cout << "wrong input" << endl;
return 0;
}
:- module(ada_boost, [
initModelWithTraining/9,
initModelNoTraining/2,
classifyWithProb/8,
classifyNoProb/5,
numClasses/1,
getTolerance/1,
modifyTolerance/1,
train/10]).
%% requirements of library(struct)
:- load_files(library(str_decl),
[when(compile_time), if(changed)]).
%% needed for using the array type
:- use_module(library(structs)).
:- use_module('../../helper_files/helper.pl').
%% type definitions for the float array
:- foreign_type
float32 = float_32,
float_array = array(float32).
%% definitions for the connected function
%% TODO:
%% input: const MatType & data,
%% const arma::Row< size_t > & labels,
%% const size_t numClasses,
%% const WeakLearnerType & other,
%% const size_t iterations = 100,
%% const double tolerance = 1e-6
%% output:
%% description:
foreign(initModelWithTraining, c, initModelWithTraining(+pointer(float_array), +integer, +integer, +pointer(float_array), +integer, +integer, +string, +integer , +float32)).
%% TODO:
%% input: const double tolerance = 1e-6
%% output:
%% description:
foreign(initModelNoTraining, c, initModelNoTraining(+float32, +string)).
%% TODO:
%% input: const MatType & test,
%% arma::Row< size_t > & predictedLabels,
%% arma::mat & probabilities
%% output:
%% description:
foreign(classifyWithProb, c, classifyWithProb(+pointer(float_array), +integer, +integer, -pointer(float_array), -integer, -pointer(float_array), -integer, -integer)).
%% TODO:
%% input: const MatType & test,
%% arma::Row< size_t > & predictedLabels
%% output:
%% description:
foreign(classifyNoProb, c, classifyNoProb(+pointer(float_array), +integer, +integer, -pointer(float_array), -integer)).
%% TODO:
%% input:
%% output: size_t classesNum
%% description:
foreign(numClasses, c, numClasses([-integer])).
%% TODO:
%% input:
%% output: double tol
%% description:
foreign(getTolerance, c, getTolerance([-float32])).
%% TODO:
%% input:
%% output: double& tol
%% description:
foreign(modifyTolerance, c, modifyTolerance(+float32)).
%% TODO:
%% input: const MatType & data,
%% const arma::Row< size_t > & labels,
%% const size_t numClasses,
%% const WeakLearnerType & learner,
%% const size_t iterations = 100,
%% const double tolerance = 1e-6
%%
%% output: double upper bound training error
%% description:
foreign(train, c, train(+pointer(float_array), +integer, +integer, +pointer(float_array), +integer, +integer, +string, +integer , +float32, [-float32])).
%% Defines the functions that get connected from main.cpp
foreign_resource(ada_boost, [
initModelWithTraining,
initModelNoTraining,
classifyWithProb,
classifyNoProb,
numClasses,
getTolerance,
modifyTolerance,
train
]).
:- load_foreign_resource(ada_boost).
\ No newline at end of file
:- use_module(library(plunit)).
:- use_module(ada_boost).
:- use_module('../../helper_files/helper.pl').
reset_Model :-
initModelNoTraining(0.0001, perceptron).
:- begin_tests(lists).
%% alpha tests
test(tol_after_train, [true(A =:= 0.0001), true(B =:= 1)]) :-
reset_Model,
convert_list_to_float_array([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize, 2, perceptron, 50, 0.0001, B),
getTolerance(A).
test(tol_after_train2, [true(A =:= 0.0001), true(B =:= 1)]) :-
reset_Model,
convert_list_to_float_array([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5],6, array(Xsize, Xrownum, X)),
convert_list_to_float_array([1,2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize, 2, perceptron, 50, 0.0001, B),
getTolerance(A).
test(tol_after_train_fail, [true(B =:= 1)]) :-
reset_Model,
convert_list_to_float_array([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5],2, array(Xsize, Xrownum, X)),
convert_list_to_float_array([1,2,2,2,1,1], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize, 2, perceptron, 50, 0.0001, B),
classifyNoProb(X,Xsize, Xrownum, NewY, NewYsize),
print(Ysize),
print(NewYsize),
convert_float_array_to_list(NewY, NewYsize, Results),
print([1,2,2,2,1,1]),
print(Results).
:- end_tests(lists).
\ No newline at end of file
......@@ -27,7 +27,7 @@ void sampleFunction()
}
void takeNumberFunction(SP_integer integerNumber, double doubleNumber)
void takeNumberFunction(SP_integer integerNumber, double doubleNumber, char const *string)
{
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment