Skip to content
Snippets Groups Projects
Commit 4dd41b9c authored by rtalbi's avatar rtalbi
Browse files

non-privacy presrerving neural networks (finished code, started debug)

parent f93579cf
Branches Vstable
No related tags found
No related merge requests found
......@@ -24,10 +24,9 @@ NN::NN(double alpha, int epochs, int batchSize, float th, DatasetReader *dt, str
this->mainpath = mainpath;
network.resize(3);
// initialize the weights of the newtork
for(int i=1; i < network.size()+1; i++)
{
......@@ -35,7 +34,7 @@ NN::NN(double alpha, int epochs, int batchSize, float th, DatasetReader *dt, str
{
vector<float> weights;
for (int k=0; k < network_dimensions[i-1]+1; k++)
weights.push_back(0.0);
weights.push_back(0.0); // je compte le biais
neuron *n = new neuron(weights, alpha,epochs, batchSize, th, dt,debug);
network[i-1].push_back(n);
......@@ -58,23 +57,52 @@ NN::NN(double alpha, int epochs, int batchSize, float th, DatasetReader *dt, str
}
vector<vector<float>> NN::forward_layer(vector<neuron*> layer, vector<vector<float>> x, bool test ){
vector<vector<float>> NN::forward_layer(vector<neuron*> layer, vector<vector<float>> x, bool test, bool first ){
vector<vector<float>> res;
if(!first)
{
for (int i=0; i<x.size(); i++)
{
x[i].insert(x[i].begin(), 1);
}
}
for (int j=0; j < layer.size(); j++)
{
neuron *n = layer[j];
res.push_back(n->predict_batch(x,test));
}
return res;
//todo : add an additional step to invert: batch_layes
vector<vector<float>> res_final(x.size(), vector<float > (layer.size(),0.0));
for (int i=0; i<x.size(); i++)
{
for (int k=0; k< layer.size(); k++)
{
float e = res[k][i];
res_final[i][k] = e;
}
}
return res_final;
}
vector<int> NN::predict(vector<Record *>R, bool test ) {
// todo: edit so that the final output is only a class label and make sure itrs the same thing as ytrue
bool first = true;
vector<vector<float>> XB;
for (int i=0; i < R.size(); i++)
{
......@@ -85,7 +113,9 @@ vector<int> NN::predict(vector<Record *>R, bool test ) {
for (int i=0; i < network.size(); i++)
{
XB = forward_layer(network[i], XB, test );
XB = forward_layer(network[i], XB, test, first);
first = false;
}
......@@ -135,20 +165,28 @@ void NN::backpropagate(vector<Record *> XB){
vector<int> prediction = predict(XB, false);
vector<vector<float>> R;
vector<vector<float>>ytrue;
vector<vector<float>> ytrue(2, vector<float>(XB.size(),0));
int dim = XB[0]->values.size()-1;
for(int i=0; i<XB.size(); i++)
{
int numberClasses = 2;
for(int i=0; i<XB.size(); i++)
{
//todo: try to understand why is there a double amount of values per class in ytrue
vector<float> r = vector<float> (XB[i]->values.begin(), XB[i]->values.end());
r.pop_back();
R.push_back(r);
std::vector<float> hot_label(2); // hard coded the number of classes
for (int s=0; s<numberClasses; s++) {
if(s!= XB[i]->values[dim])
ytrue[s].push_back(0);
else
ytrue[s].push_back(1);
}
}
vector<float> r = vector<float> (XB[i]->values.begin(), XB[i]->values.end());
r.pop_back();
R.push_back(r);
std::vector<float> hot_label(2); // hard coded the number of classes
hot_label[XB[i]->values[dim]] =1 ;
hot_label[1-XB[i]->values[dim]] =0;
ytrue.push_back(hot_label);
}
......
......@@ -25,6 +25,7 @@ public :
double alpha;
int batchSize;
vector<vector<neuron*>> network;
vector<int> network_dimensions = {14,12,6,2};
float th;
......@@ -60,7 +61,7 @@ public :
void backpropagate(vector<Record *> XB);
public :
vector<vector<float>> forward_layer(vector<neuron*> layer, vector<vector<float>> x, bool test );
vector<vector<float>> forward_layer(vector<neuron*> layer, vector<vector<float>> x, bool test, bool first = false);
public :
void train ();
......
......@@ -94,8 +94,8 @@ vector<float> neuron::miniBatchGrad( vector<float> ypred, vector<float> ytrue
vector <float > diff;
vector <float> r;
float inter = 0.0;
int dim = this->previous_input.size();
vector<vector<float>> XB;
int dim = this->previous_input[0].size();
vector<vector<float>> XB = this->previous_input;
// Compute XB transpose
float transpose [dim] [XB.size()];
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment