Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
D
DAPPLE-2.0
Manage
Activity
Members
Labels
Plan
Issues
0
Issue boards
Milestones
Wiki
Code
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Deploy
Releases
Package Registry
Model registry
Operate
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Rania Talbi
DAPPLE-2.0
Commits
4dd41b9c
Commit
4dd41b9c
authored
3 years ago
by
rtalbi
Browse files
Options
Downloads
Patches
Plain Diff
non-privacy presrerving neural networks (finished code, started debug)
parent
f93579cf
Branches
Vstable
No related tags found
No related merge requests found
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
ML/NN/NN.cpp
+56
-18
56 additions, 18 deletions
ML/NN/NN.cpp
ML/NN/NN.h
+2
-1
2 additions, 1 deletion
ML/NN/NN.h
ML/NN/neuron.cpp
+2
-2
2 additions, 2 deletions
ML/NN/neuron.cpp
with
60 additions
and
21 deletions
ML/NN/NN.cpp
+
56
−
18
View file @
4dd41b9c
...
...
@@ -24,10 +24,9 @@ NN::NN(double alpha, int epochs, int batchSize, float th, DatasetReader *dt, str
this
->
mainpath
=
mainpath
;
network
.
resize
(
3
);
// initialize the weights of the newtork
for
(
int
i
=
1
;
i
<
network
.
size
()
+
1
;
i
++
)
{
...
...
@@ -35,7 +34,7 @@ NN::NN(double alpha, int epochs, int batchSize, float th, DatasetReader *dt, str
{
vector
<
float
>
weights
;
for
(
int
k
=
0
;
k
<
network_dimensions
[
i
-
1
]
+
1
;
k
++
)
weights
.
push_back
(
0.0
);
weights
.
push_back
(
0.0
);
// je compte le biais
neuron
*
n
=
new
neuron
(
weights
,
alpha
,
epochs
,
batchSize
,
th
,
dt
,
debug
);
network
[
i
-
1
].
push_back
(
n
);
...
...
@@ -58,23 +57,52 @@ NN::NN(double alpha, int epochs, int batchSize, float th, DatasetReader *dt, str
}
vector
<
vector
<
float
>>
NN
::
forward_layer
(
vector
<
neuron
*>
layer
,
vector
<
vector
<
float
>>
x
,
bool
test
){
vector
<
vector
<
float
>>
NN
::
forward_layer
(
vector
<
neuron
*>
layer
,
vector
<
vector
<
float
>>
x
,
bool
test
,
bool
first
){
vector
<
vector
<
float
>>
res
;
if
(
!
first
)
{
for
(
int
i
=
0
;
i
<
x
.
size
();
i
++
)
{
x
[
i
].
insert
(
x
[
i
].
begin
(),
1
);
}
}
for
(
int
j
=
0
;
j
<
layer
.
size
();
j
++
)
{
neuron
*
n
=
layer
[
j
];
res
.
push_back
(
n
->
predict_batch
(
x
,
test
));
}
return
res
;
//todo : add an additional step to invert: batch_layes
vector
<
vector
<
float
>>
res_final
(
x
.
size
(),
vector
<
float
>
(
layer
.
size
(),
0.0
));
for
(
int
i
=
0
;
i
<
x
.
size
();
i
++
)
{
for
(
int
k
=
0
;
k
<
layer
.
size
();
k
++
)
{
float
e
=
res
[
k
][
i
];
res_final
[
i
][
k
]
=
e
;
}
}
return
res_final
;
}
vector
<
int
>
NN
::
predict
(
vector
<
Record
*>
R
,
bool
test
)
{
// todo: edit so that the final output is only a class label and make sure itrs the same thing as ytrue
bool
first
=
true
;
vector
<
vector
<
float
>>
XB
;
for
(
int
i
=
0
;
i
<
R
.
size
();
i
++
)
{
...
...
@@ -85,7 +113,9 @@ vector<int> NN::predict(vector<Record *>R, bool test ) {
for
(
int
i
=
0
;
i
<
network
.
size
();
i
++
)
{
XB
=
forward_layer
(
network
[
i
],
XB
,
test
);
XB
=
forward_layer
(
network
[
i
],
XB
,
test
,
first
);
first
=
false
;
}
...
...
@@ -135,20 +165,28 @@ void NN::backpropagate(vector<Record *> XB){
vector
<
int
>
prediction
=
predict
(
XB
,
false
);
vector
<
vector
<
float
>>
R
;
vector
<
vector
<
float
>>
ytrue
;
vector
<
vector
<
float
>>
ytrue
(
2
,
vector
<
float
>
(
XB
.
size
(),
0
))
;
int
dim
=
XB
[
0
]
->
values
.
size
()
-
1
;
for
(
int
i
=
0
;
i
<
XB
.
size
();
i
++
)
{
int
numberClasses
=
2
;
for
(
int
i
=
0
;
i
<
XB
.
size
();
i
++
)
{
//todo: try to understand why is there a double amount of values per class in ytrue
vector
<
float
>
r
=
vector
<
float
>
(
XB
[
i
]
->
values
.
begin
(),
XB
[
i
]
->
values
.
end
());
r
.
pop_back
();
R
.
push_back
(
r
);
std
::
vector
<
float
>
hot_label
(
2
);
// hard coded the number of classes
for
(
int
s
=
0
;
s
<
numberClasses
;
s
++
)
{
if
(
s
!=
XB
[
i
]
->
values
[
dim
])
ytrue
[
s
].
push_back
(
0
);
else
ytrue
[
s
].
push_back
(
1
);
}
}
vector
<
float
>
r
=
vector
<
float
>
(
XB
[
i
]
->
values
.
begin
(),
XB
[
i
]
->
values
.
end
());
r
.
pop_back
();
R
.
push_back
(
r
);
std
::
vector
<
float
>
hot_label
(
2
);
// hard coded the number of classes
hot_label
[
XB
[
i
]
->
values
[
dim
]]
=
1
;
hot_label
[
1
-
XB
[
i
]
->
values
[
dim
]]
=
0
;
ytrue
.
push_back
(
hot_label
);
}
...
...
This diff is collapsed.
Click to expand it.
ML/NN/NN.h
+
2
−
1
View file @
4dd41b9c
...
...
@@ -25,6 +25,7 @@ public :
double
alpha
;
int
batchSize
;
vector
<
vector
<
neuron
*>>
network
;
vector
<
int
>
network_dimensions
=
{
14
,
12
,
6
,
2
};
float
th
;
...
...
@@ -60,7 +61,7 @@ public :
void
backpropagate
(
vector
<
Record
*>
XB
);
public
:
vector
<
vector
<
float
>>
forward_layer
(
vector
<
neuron
*>
layer
,
vector
<
vector
<
float
>>
x
,
bool
test
);
vector
<
vector
<
float
>>
forward_layer
(
vector
<
neuron
*>
layer
,
vector
<
vector
<
float
>>
x
,
bool
test
,
bool
first
=
false
);
public
:
void
train
();
...
...
This diff is collapsed.
Click to expand it.
ML/NN/neuron.cpp
+
2
−
2
View file @
4dd41b9c
...
...
@@ -94,8 +94,8 @@ vector<float> neuron::miniBatchGrad( vector<float> ypred, vector<float> ytrue
vector
<
float
>
diff
;
vector
<
float
>
r
;
float
inter
=
0.0
;
int
dim
=
this
->
previous_input
.
size
();
vector
<
vector
<
float
>>
XB
;
int
dim
=
this
->
previous_input
[
0
]
.
size
();
vector
<
vector
<
float
>>
XB
=
this
->
previous_input
;
// Compute XB transpose
float
transpose
[
dim
]
[
XB
.
size
()];
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment