diff --git a/data_viz.py b/data_viz.py
index 385a19f86da782372a70c6f01c6e56d874f015a0..b655f79c63a48307ffa2a30d5decb2791c4df3c2 100644
--- a/data_viz.py
+++ b/data_viz.py
@@ -254,7 +254,7 @@ def add_length(dataframe):
     dataframe['length']=dataframe['seq'].map(fonc)
 
 
-# df = pd.read_csv('output/out_common_ISA_ISA_eval.csv')
+df = pd.read_csv('output/out_common_ISA_ISA_eval_2.csv')
 # add_length(df)
 # df['abs_error'] =  np.abs(df['rt pred']-df['true rt'])
 # histo_abs_error(df, display=False, save=True, path='fig/custom model res/histo_ISA_ISA_eval.png')
@@ -275,11 +275,11 @@ def add_length(dataframe):
 # scatter_rt(df, display=False, save=True, path='fig/custom model res/RT_pred_prosit_ISA_eval.png', color=True)
 # histo_length_by_error(df, bins=10, display=False, save=True, path='fig/custom model res/histo_length_prosit_ISA_eval.png')
 
-# df = pd.read_csv('output/out_common_ISA_ISA_eval_2.csv')
+# df = pd.read_csv('output/out_common_ISA_ISA_eval_3.csv')
 # add_length(df)
 # df['abs_error'] =  np.abs(df['rt pred']-df['true rt'])
-# histo_abs_error(df, display=False, save=True, path='fig/custom model res/histo_ISA_ISA_eval_2.png')
-# scatter_rt(df, display=False, save=True, path='fig/custom model res/RT_pred_ISA_ISA_eval_2_seq.png', color=True, col = 'seq')
-# histo_length_by_error(df, bins=10, display=False, save=True, path='fig/custom model res/histo_length_ISA_ISA_eval_2.png')
+# histo_abs_error(df, display=False, save=True, path='fig/custom model res/histo_ISA_ISA_eval_3.png')
+# scatter_rt(df, display=False, save=True, path='fig/custom model res/RT_pred_ISA_ISA_eval_3_file.png', color=True, col = 'file')
+# histo_length_by_error(df, bins=10, display=False, save=True, path='fig/custom model res/histo_length_ISA_ISA_eval_3.png')
 
 
diff --git a/main_custom.py b/main_custom.py
index 7de21bdad583b8cfe578e8526ce550d15b2cc3b3..59f34e834557b4d8b41844ee600e9133b2d5320c 100644
--- a/main_custom.py
+++ b/main_custom.py
@@ -186,6 +186,16 @@ def run(epochs, eval_inter, save_inter, model, data_train, data_val, data_test,
             if e % save_inter == 0:
                 save(model, 'model_common_' + str(e) + '.pt')
         save_pred(model, data_val, 'both', output)
+    elif forward=='reverse':
+        for e in range(1, epochs + 1):
+            train(model, data_train, e, optimizer, criterion_rt, criterion_intensity, metric_rt, metric_intensity, 'both',
+                  wandb=wandb)
+            if e % eval_inter == 0:
+                eval(model, data_val, e, criterion_rt, criterion_intensity, metric_rt, metric_intensity, 'rt',
+                     wandb=wandb)
+            if e % save_inter == 0:
+                save(model, 'model_common_' + str(e) + '.pt')
+        save_pred(model, data_val, 'rt', output)
 
     else :
         for e in range(1, epochs + 1):
@@ -213,7 +223,7 @@ def main(args):
         data_train, data_val, data_test = common_dataset.load_data(path_train=args.dataset_train,
                                                                    path_val=args.dataset_val,
                                                                    path_test=args.dataset_test,
-                                                                   batch_size=args.batch_size, length=25, pad = True, convert=True, vocab='iapuc')
+                                                                   batch_size=args.batch_size, length=25, pad = True, convert=True, vocab='unmod')
     elif args.forward == 'rt':
         data_train, data_val, data_test = dataloader.load_data(data_sources=[args.dataset_train,args.dataset_val,args.dataset_test],
                                                                batch_size=args.batch_size, length=25)
@@ -225,7 +235,16 @@ def main(args):
         _, data_val, data_test = common_dataset.load_data(path_train=args.dataset_val,
                                                                    path_val=args.dataset_val,
                                                                    path_test=args.dataset_test,
-                                                                   batch_size=args.batch_size, length=25, pad = True, convert=True, vocab='iapuc')
+                                                                   batch_size=args.batch_size, length=25, pad = True, convert=True, vocab='unmod')
+
+    elif args.forward == 'reverse':
+        _, data_val, data_test = dataloader.load_data(data_sources=['database/data_holdout.csv',args.dataset_val,args.dataset_test],
+                                                               batch_size=args.batch_size, length=25)
+
+        data_train, _, _ = common_dataset.load_data(path_train=args.dataset_train,
+                                                                   path_val=args.dataset_train,
+                                                                   path_test=args.dataset_train,
+                                                                   batch_size=args.batch_size, length=25, pad = True, convert=True, vocab='unmod')
 
     print('\nData loaded')