diff --git a/code/binary_bpr/main.py b/code/binary_bpr/main.py
index b920d8d5f74191bd106b30f6ad3113375c54f665..479cedff19743bad9076af5eba80991c6e7ed2c3 100644
--- a/code/binary_bpr/main.py
+++ b/code/binary_bpr/main.py
@@ -289,6 +289,9 @@ if __name__ == '__main__':
     acc_list = []
     auc_list = []
     rmse_list = []
+    precision_list = []
+    recall_list = []
+    f1_list = []
     doa_train = []
     #doa_test = []
     if mode == 1 :
@@ -308,6 +311,7 @@ if __name__ == '__main__':
             dico_kc, dico_users, dico_items = read_file(dataTrain, dataTest)
             embedding_size = len(dico_kc)
             dico_items, t_train, ty_train, item_users = parse_dataframe(dataTrain, dico_kc, dico_users, dico_items, True)
+
             train, y_train = generate_quad(dico_items, t_train, ty_train, item_users, alpha)
             dico_items, test, y_test = parse_dataframe(dataTest, dico_kc, dico_users, dico_items, False)
             num_users = len(dico_users)
@@ -328,10 +332,13 @@ if __name__ == '__main__':
             write_file_doa(FileNameTrain_temp, emb[0], train, dico_kc, dico_users, dico_items)
             doa = compute_doa(FileNameTrain_temp)
             # Test
-            correctness, acc, users, auc, rmse = bpr_model.evaluate_model(test, len(dico_kc), y_test)
+            correctness, acc, users, auc, rmse, precision, recall, f1 = bpr_model.evaluate_model(test, len(dico_kc), y_test)
             acc_list.append(acc)
             auc_list.append(auc)
             rmse_list.append(rmse)
+            precision_list.append(precision)
+            recall_list.append(recall)
+            f1_list.append(f1)
             doa_train.append(doa)
             print("Doa:", doa)
             print("AUC and RMSE:", auc, rmse)
@@ -345,11 +352,17 @@ if __name__ == '__main__':
         print(acc_list)
         print(auc_list)
         print(rmse_list)
+        print(precision_list)
+        print(recall_list)
+        print(f1_list)
         print(doa_train)
         #print(doa_test)
         print("acc :",np.mean(acc_list),"+-",np.std(acc_list))
         print("auc :", np.mean(auc_list), "+-", np.std(auc_list))
         print("rmse :", np.mean(rmse_list), "+-", np.std(rmse_list))
+        print("precision :", np.mean(precision_list), "+-", np.std(precision_list))
+        print("recall :", np.mean(recall_list), "+-", np.std(recall_list))
+        print("f1 :", np.mean(f1_list), "+-", np.std(f1_list))
         print("doa_train :", np.mean(doa_train), "+-", np.std(doa_train))
         #print("doa_test :", np.mean(doa_test), "+-", np.std(doa_test))
         #print("reo :",1- np.mean(doa_test)/np.mean(doa_train))
@@ -381,7 +394,7 @@ if __name__ == '__main__':
         doa = compute_doa(trainFileName)
         print("Doa:", doa)
         # Test
-        correctness, acc, users, auc, rmse = bpr_model.evaluate_model(test, len(dico_kc), y_test)
+        correctness, acc, users, auc, rmse, precision, recall, f1 = bpr_model.evaluate_model(test, len(dico_kc), y_test)
         print(f'Accuracy: {acc}')
         #new_embedding_value = bpr_model.user_embeddings.weight.clone().detach().cpu().numpy()
         #write_file_doa_test(testFileName, new_embedding_value, test, y_test, dico_kc, dico_users, dico_items)
diff --git a/code/binary_bpr/script.py b/code/binary_bpr/script.py
index f87bf4fd7be4ec040413fd82b36db2eded9548c7..2a77babc503f6b4a78c730e2c35b3057c5617668 100644
--- a/code/binary_bpr/script.py
+++ b/code/binary_bpr/script.py
@@ -1,8 +1,8 @@
 import os
-dPath = "../../data/"
+dPath = "../../data/cdbpr_format/"
 embDirPath = "../../results/table_2/"
 datasets = ['assist0910_tkde', 'assist17_tkde', 'algebra','math_1', 'math_2']
-epochs = [75 ,75, 95, 5, 90, 90]
+epochs = [70, 95, 75, 75, 90]
 batchSize =[ 512, 512,512, 512,512]
 learningRate = [0.01,0.01,0.01,0.01,0.01]
 mode = [1,1,1,1,1]
diff --git a/code/binary_bpr_ablation/BPR_model.py b/code/binary_bpr_ablation/BPR_model.py
index 6746938213182c83c589044107826db01f8a4caf..441416e0a6be1272d3cbb055097e89b872997eee 100644
--- a/code/binary_bpr_ablation/BPR_model.py
+++ b/code/binary_bpr_ablation/BPR_model.py
@@ -5,7 +5,7 @@ from torch.utils.data import DataLoader, TensorDataset
 import numpy as np
 import torch.nn.functional as F
 import pandas as pd
-from sklearn.metrics import roc_auc_score
+from sklearn.metrics import roc_auc_score, recall_score, f1_score
 from sklearn.metrics import mean_squared_error
 from sklearn.metrics import accuracy_score
 from sklearn.metrics import precision_score
@@ -151,12 +151,12 @@ class BPRModel(nn.Module):
                     all_decisions = np.concatenate((all_decisions, comp), axis=0)
                                
         mse1 = mean_squared_error(all_labels, all_predictions)
-        print("RMSE", np.sqrt(mse1))
         auc = roc_auc_score(all_labels, all_predictions)
-        print("AUC:", auc)
         acc = accuracy_score(all_labels, all_decisions)
-        print("ACC:", acc)
         precision = precision_score(all_labels, all_decisions)
-        print(f'Prec: {precision}')
-        return acc, precision
+        recall = recall_score(all_labels, all_decisions)
+        f1 = f1_score(all_labels, all_decisions)
+
+        print(f"acc, precision, recall, f1, auc, rmse : {acc},{precision},{recall},{f1},{auc},{np.sqrt(mse1)}")
+        return acc, precision, recall,f1, auc,np.sqrt(mse1)
 
diff --git a/code/binary_bpr_ablation/main.py b/code/binary_bpr_ablation/main.py
index 3fa28a5b549bb298bf38638bb1366086300e3ec0..0f8217a25a3c6d6a5df47629e822a2bd2c5bbf26 100644
--- a/code/binary_bpr_ablation/main.py
+++ b/code/binary_bpr_ablation/main.py
@@ -187,8 +187,7 @@ def write_file_doa(FileName, embed, train, dico_kc, dico_users, dico_items):
 #############################
 #############################
 # HP
-epochs = 20
-batch_size = 1024
+batch_size = 512
 ablation = 0
 # 0 no ablation, 1 ablation L2, 2 ablation init, 3 both
 if __name__ == '__main__':
@@ -196,9 +195,11 @@ if __name__ == '__main__':
     parser.add_argument("-dtrain", "--dataTrain", help="data file")
     parser.add_argument("-dtest", "--dataTest", help="data file")
     parser.add_argument("-ab", "--ablation", help="int")
+    parser.add_argument("-e", "--epochs", help="int",type=int)
     args = parser.parse_args()
     dataTrain = args.dataTrain
     dataTest = args.dataTest
+    epochs = args.epochs
     if args.ablation:
         ablation = int(args.ablation)
     file = dataTrain[:-4]
@@ -223,4 +224,5 @@ if __name__ == '__main__':
     doa = compute_doa(file)
     print("DOA:", doa)
     # Test
-    acc, precision = bpr_model.evaluate_model(test, len(dico_kc), y_test) 
+    acc, precision, recall,f1, auc, rmse = bpr_model.evaluate_model(test, len(dico_kc), y_test)
+
diff --git a/code/binary_bpr_ablation/script_ablation.py b/code/binary_bpr_ablation/script_ablation.py
index 7825c4b543b207d1ca0214d7e68a3eb72c2427cc..b7fb6e3deb0d3bb98f7f71c1d320deeebeaedb35 100644
--- a/code/binary_bpr_ablation/script_ablation.py
+++ b/code/binary_bpr_ablation/script_ablation.py
@@ -1,9 +1,10 @@
 import os
 
+epochs = [70, 95, 75, 75, 90]
 name = ["assist0910_tkde","assist17_tkde","algebra", "math_1","math_2"]
 for i in range(4):
     print("Ablation (0 no ablation, 1 ablation L2, 2 ablation init, 3 both) ",i)
     for a in range(5):
         print(name[i])
-        cmd = "python main.py --dataTrain ../../data/cdbpr_format/"+name[i]+"/train_0.csv --dataTest ../../data/cdbpr_format/"+name[i]+"/test_0.csv --ablation "+str(i)
+        cmd = "python main.py --epochs "+str(epochs[i])+" --dataTrain ../../data/cdbpr_format/"+str(name[i])+"/train_valid_"+str(a)+".csv --dataTest ../../data/cdbpr_format/"+str(name[i])+"/test_"+str(a)+".csv --ablation "+str(i)
         os.system(cmd)