Skip to content
Snippets Groups Projects
Commit 1883837c authored by Jakhes's avatar Jakhes
Browse files

Adding lmnn

parent 2ffc23d5
No related branches found
No related tags found
No related merge requests found
splfr=/usr/local/sicstus4.7.1/bin/splfr
METHOD_NAME=lmnn
$(METHOD_NAME).so: $(METHOD_NAME).pl $(METHOD_NAME).cpp
$(splfr) -larmadillo -fopenmp -lmlpack -lstdc++ -cxx --struct $(METHOD_NAME).pl $(METHOD_NAME).cpp ../../helper_files/helper.cpp
clean:
rm $(METHOD_NAME).so
#include <sicstus/sicstus.h>
/* ex_glue.h is generated by splfr from the foreign/[2,3] facts.
Always include the glue header in your foreign resource code.
*/
#include "lmnn_glue.h"
#include <mlpack/methods/lmnn/lmnn.hpp>
#include <mlpack/core.hpp>
// including helper functions for converting between arma structures and arrays
#include "../../helper_files/helper.hpp"
// some of the most used namespaces
using namespace arma;
using namespace mlpack;
using namespace std;
using namespace lmnn;
using namespace mlpack::metric;
// TODO:
// input: const arma::mat & dataset,
// const arma::Row< size_t > & labels,
// const size_t k,
// const MetricType metric = MetricType()
// arma::mat & outputMatrix <-,
// output:
// description:
void lmnn(char const *optimizer,
float *dataMatArr, SP_integer dataMatSize, SP_integer dataMatRowNum,
float *labelsArr, SP_integer labelsArrSize,
SP_integer k,
double regularization, double stepSize, SP_integer passes, SP_integer maxIterations, double tolerance,
SP_integer center, SP_integer shuffle,
SP_integer batchSize, SP_integer range, SP_integer rank,
float **outputMatArr, SP_integer *outputMatColNum, SP_integer *outputMatRowNum)
{
// convert the Prolog arrays to arma::mat
mat data = convertArrayToMat(dataMatArr, dataMatSize, dataMatRowNum);
// convert the Prolog arrays to arma::rowvec
Row< size_t > labelsVector = convertArrayToVec(labelsArr, labelsArrSize);
// Carry out mean-centering on the dataset, if necessary.
if ((center == 1))
{
for (size_t i = 0; i < data.n_rows; ++i)
{
data.row(i) -= arma::mean(data.row(i));
}
}
// Now, normalize the labels.
arma::Col<size_t> mappings;
arma::Row<size_t> labels;
data::NormalizeLabels(labelsVector, labels, mappings);
// create the ReturnMat
mat outputReturnMat;
if (rank > 0)
{
outputReturnMat = arma::randu(rank, data.n_rows);
}
else
{
outputReturnMat.eye();
}
if (strcmp(optimizer, "amsgrad") == 0)
{
LMNN<LMetric<2>> lmnn(data, labelsVector, k);
lmnn.Regularization() = regularization;
lmnn.Range() = range;
lmnn.Optimizer().StepSize() = stepSize;
lmnn.Optimizer().MaxIterations() = passes * data.n_cols;
lmnn.Optimizer().Tolerance() = tolerance;
lmnn.Optimizer().Shuffle() = (shuffle == 1);
lmnn.Optimizer().BatchSize() = batchSize;
lmnn.LearnDistance(outputReturnMat);
}
else if (strcmp(optimizer, "amsgrad") == 0)
{
LMNN<LMetric<2>, ens::BBS_BB> lmnn(data, labelsVector, k);
lmnn.Regularization() = regularization;
lmnn.Range() = range;
lmnn.Optimizer().StepSize() = stepSize;
lmnn.Optimizer().MaxIterations() = passes * data.n_cols;
lmnn.Optimizer().Tolerance() = tolerance;
lmnn.Optimizer().Shuffle() = (shuffle == 1);
lmnn.Optimizer().BatchSize() = batchSize;
lmnn.LearnDistance(outputReturnMat);
}
else if (strcmp(optimizer, "amsgrad") == 0)
{
// copied advise from the lmnn.main file
// Using SGD is not recommended as the learning matrix can
// diverge to inf causing serious memory problems.
LMNN<LMetric<2>, ens::StandardSGD> lmnn(data, labelsVector, k);
lmnn.Regularization() = regularization;
lmnn.Range() = range;
lmnn.Optimizer().StepSize() = stepSize;
lmnn.Optimizer().MaxIterations() = passes * data.n_cols;
lmnn.Optimizer().Tolerance() = tolerance;
lmnn.Optimizer().Shuffle() = (shuffle == 1);
lmnn.Optimizer().BatchSize() = batchSize;
lmnn.LearnDistance(outputReturnMat);
}
else if (strcmp(optimizer, "amsgrad") == 0)
{
LMNN<LMetric<2>, ens::L_BFGS> lmnn(data, labelsVector, k);
lmnn.Regularization() = regularization;
lmnn.Range() = range;
lmnn.Optimizer().MaxIterations() = maxIterations;
lmnn.Optimizer().MinGradientNorm() = tolerance;
lmnn.LearnDistance(outputReturnMat);
}
else
{
cout << "wrong optimizer input" << endl;
}
// return the Matrix
returnMatrixInformation(outputReturnMat, outputMatArr, outputMatColNum, outputMatRowNum);
}
\ No newline at end of file
:- module(lmnn, [lmnn/20]).
%% requirements of library(struct)
:- load_files(library(str_decl),
[when(compile_time), if(changed)]).
%% needed for using the array type
:- use_module(library(structs)).
:- use_module('../../helper_files/helper.pl').
%% type definitions for the float array
:- foreign_type
float32 = float_32,
float_array = array(float32).
%% definitions for the connected function
%% TODO:
%% --Input--
%% string optimizer,
%% mat data,
%% vec labels,
%% int k,
%% float32 regularization,
%% float32 stepSize,
%% int passes,
%% int maxIterations,
%% int tolerance,
%% bool center => (1)true / (0)false,
%% bool shuffle => (1)true / (0)false,
%% int batchSize,
%% int range,
%% int rank,
%%
%% --Output--
%% mat distance
%%
%% --Description--
foreign(lmnn, c, lmnn( +string,
+pointer(float_array), +integer, +integer,
+pointer(float_array), +integer,
+integer,
+float32, +float32, +integer, +integer, +float32,
+integer, +integer,
+integer, +integer, +integer,
-pointer(float_array), -integer, -integer)).
%% Defines the functions that get connected from main.cpp
foreign_resource(lmnn, [lmnn]).
:- load_foreign_resource(lmnn).
\ No newline at end of file
:- use_module(library(plunit)).
:- use_module(lmnn).
:- use_module('../../helper_files/helper.pl').
reset_Model :-
initModel(1,0,50,0.0001).
:- begin_tests(lists).
%% alpha tests
test(alpha_std_init) :-
reset_Model,
alpha(0).
test(alpha_wrong_input, fail) :-
reset_Model,
alpha(1).
test(alpha_after_train, A =:= 9223372036854775808) :-
reset_Model,
convert_list_to_float_array([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize),
alpha(A).
%% train tests
test(correct_train) :-
reset_Model,
convert_list_to_float_array([5.1,3.5,1.4,4.9,3.0,1.4,4.7,3.2,1.3,4.6,3.1,1.5],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train, fail) :-
reset_Model,
convert_list_to_float_array([],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train2, fail) :-
reset_Model,
convert_list_to_float_array([],0, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train3, fail) :-
reset_Model,
convert_list_to_float_array([1,2],0, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train3, fail) :-
reset_Model,
convert_list_to_float_array([1,2,44,3],3, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
test(false_train4) :-
reset_Model,
convert_list_to_float_array([1,2,44,3],2, array(Xsize, Xrownum, X)),
convert_list_to_float_array([0.2,0.2,0.2,0.2], array(Ysize, Y)),
train(X,Xsize, Xrownum,Y, Ysize).
:- end_tests(lists).
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment