diff --git a/draw_visu.py b/draw_visu.py
index 04e34c362f665c22ef8572415cac6e771dcf3974..69d4738aaa368b40f2bf1ddd7b6432695e0d35de 100644
--- a/draw_visu.py
+++ b/draw_visu.py
@@ -34,7 +34,7 @@ def load_data(fn, graph_dir):
     df = pd.read_csv(fn, sep="\t")
     df["type_graph"] = df.filename.apply(lambda x: x[6:]).apply(lambda x: re.sub("_[\d]+.gml", "", x).replace("_", " "))
     df["parameters"] = df.filename.apply(lambda x: get_graph_attr(x, graph_dir))
-    df["sample"] = df.filename.apply(get_sample_id_old)
+    df["sample"] = df.filename.apply(get_sample_id)
     non_ne = {'random_prediction', 'common_neighbours', 'jaccard_coefficient', 'adamic_adar_index',
               'preferential_attachment', 'resource_allocation_index', 'stochastic_block_model',
               'stochastic_block_model_degree_corrected', 'spatial_link_prediction'}
diff --git a/evalNE_script.py b/evalNE_script.py
index 4aa2d0872fe5d84c6aedb3bae08d3224290368d1..d9e1d12a543b0317aaadcfb3d797cd6f16e6e446 100644
--- a/evalNE_script.py
+++ b/evalNE_script.py
@@ -42,7 +42,7 @@ log("Building link prediction dataset...")
 # Create an evaluator and generate train/test edge split
 traintest_split = LPEvalSplit()
 try:
-    traintest_split.compute_splits(G, split_alg="random", train_frac=args.train_frac, fe_ratio=1)
+    traintest_split.compute_splits(G, split_alg="spanning_tree", train_frac=args.train_frac, fe_ratio=1)
 except ValueError:
     traintest_split.compute_splits(G, split_alg="fast", train_frac=args.train_frac, fe_ratio=1)
 print("BEFORE", len(traintest_split.test_edges))
@@ -88,7 +88,7 @@ if args.network_embedding:
             "python -m openne --method grarep --epochs 100"
             # "python -m openne --method lap --epochs 100",
             ]
-        edge_emb = ['average', 'hadamard']
+        edge_emb = [ 'hadamard'] #'average',
 
         # Evaluate embedding methods
         pbar = tqdm(enumerate(methods), disable=(not args.verbose))
diff --git a/run_eval_par.py b/run_eval_par.py
index ba738dce678a1dfabfd21841dfbb17fa3ede79e2..5c46de0fd4a4fd8254da62ed96011d30dd536a82 100644
--- a/run_eval_par.py
+++ b/run_eval_par.py
@@ -16,12 +16,15 @@ parser = argparse.ArgumentParser()
 parser.add_argument("dataset_dir")
 parser.add_argument("output_filename")
 parser.add_argument("-f", "--format", default="gexf", choices=["gexf", "gml", "txt"])
+parser.add_argument("-t","--train-frac",default=0.9,type=float)
+parser.add_argument("-n","--n-jobs",default=2,type=int)
 
 args = parser.parse_args()
 fns = sorted(glob.glob(args.dataset_dir + "/*." + args.format))
 
 def run_eval(fn):
-    command = "python evalNE_script.py {0} -f {1}".format(fn, args.format).split()
+    verbose_cmd = "-v" if args.verbose else ""
+    command = "python evalNE_script.py {0} -f {1} -n --train-frac {2} {3}".format(fn, args.format,args.train_frac,verbose_cmd).split()
     output = subprocess.run(command)
     if not output.returncode == 0:
         print("Error! for the command :", " ".join(command))
@@ -29,7 +32,7 @@ def run_eval(fn):
 all_res = []
 
 # Run link prediction
-Parallel(n_jobs=4,backend="multiprocessing")(delayed(run_eval)(fn) for fn in tqdm(fns))
+Parallel(n_jobs=args.n_jobs,backend="multiprocessing")(delayed(run_eval)(fn) for fn in tqdm(fns))
 
 pbar = tqdm(fns)
 for fn in pbar: