Skip to content
Snippets Groups Projects
Commit 8d9ea597 authored by Schneider Leo's avatar Schneider Leo
Browse files

fix : print train

add : ref image count threshold
change : ray config
parent 9f54597d
No related branches found
No related tags found
No related merge requests found
...@@ -77,7 +77,7 @@ def val_duo(model, data_test, loss_function, epoch, wandb): ...@@ -77,7 +77,7 @@ def val_duo(model, data_test, loss_function, epoch, wandb):
losses = losses / (label.shape[0] * len(data_test.dataset)) losses = losses / (label.shape[0] * len(data_test.dataset))
acc = acc / (len(data_test.dataset)) acc = acc / (len(data_test.dataset))
acc_contrastive = acc_contrastive / (label.shape[0] * len(data_test.dataset)) acc_contrastive = acc_contrastive / (label.shape[0] * len(data_test.dataset))
print('Test epoch {}, loss : {:.3f} acc : {:.3f} acc contrastive : {:.3f}'.format(epoch, losses, acc, print('Test epoch {}, loss : {:.3f} acc : {:.3f} acc contrastive : {:.3f}'.format(epoch, losses, acc,
acc_contrastive)) acc_contrastive))
if wandb is not None: if wandb is not None:
......
...@@ -213,9 +213,9 @@ def test_model(best_result, args): ...@@ -213,9 +213,9 @@ def test_model(best_result, args):
def main(args, gpus_per_trial=1): def main(args, gpus_per_trial=1):
config = { config = {
"lr": tune.loguniform(1e-4, 1e-2), "lr": tune.loguniform(1e-4, 1e-2),
"noise": tune.loguniform(1, 1000), "noise": tune.loguniform(1, 10000),
"p_prop": tune.uniform(5, 95), "p_prop": tune.uniform(5, 95),
"optimizer": tune.choice(['Adam', 'SGD']), "optimizer": tune.choice(['Adam', 'SGD']), #adam plus efficace ?
"sampler": tune.choice(['random', 'balanced']), "sampler": tune.choice(['random', 'balanced']),
} }
scheduler = ASHAScheduler( scheduler = ASHAScheduler(
......
...@@ -173,7 +173,7 @@ if __name__ == '__main__': ...@@ -173,7 +173,7 @@ if __name__ == '__main__':
# print(pep) # print(pep)
# f.write(pep+'\n') # f.write(pep+'\n')
# #
# df_count = compute_common_peptide("dataset_species_ref_peptides.csv", SPECIES) df_count = compute_common_peptide("dataset_species_ref_peptides.csv", SPECIES)
# #
# Create ref img # Create ref img
...@@ -181,15 +181,16 @@ if __name__ == '__main__': ...@@ -181,15 +181,16 @@ if __name__ == '__main__':
'fasta/full proteom/steigerwaltii variants/uniparc_proteome_UP000033376_2025_03_14.predicted.parquet') 'fasta/full proteom/steigerwaltii variants/uniparc_proteome_UP000033376_2025_03_14.predicted.parquet')
min_rt = df_full['RT'].min() min_rt = df_full['RT'].min()
max_rt = df_full['RT'].max() max_rt = df_full['RT'].max()
#
df = pd.read_csv("dataset_species_ref_peptides.csv") df = pd.read_csv("dataset_species_ref_peptides.csv")
#
for spe in SPECIES: for spe in SPECIES:
print(spe) print(spe)
df_spe = df[df['Specie'] == spe] df_spe = df[df['Specie'] == spe]
df_spec_no_common = df_spe[df_spe['Sequence'].isin(df_count[df_count['Count']<5]['Sequence'])]
im = build_ref_image_from_diann_global( im = build_ref_image_from_diann_global(
'fasta/global_peptide_list.parquet', target_seq=df_spe['Sequence'].to_list(), ms1_end_mz=1250, 'fasta/global_peptide_list.parquet', target_seq=df_spec_no_common['Sequence'].to_list(), ms1_end_mz=1250,
ms1_start_mz=350, bin_mz=1, max_cycle=663, min_rt=min_rt, max_rt=max_rt) ms1_start_mz=350, bin_mz=1, max_cycle=663, min_rt=min_rt, max_rt=max_rt)
plt.clf() plt.clf()
mpimg.imsave('img_ref/' + spe + '.png', im) mpimg.imsave('img_ref_common_th_5/' + spe + '.png', im)
np.save('img_ref/' + spe + '.npy', im) np.save('img_ref_common_th_5/' + spe + '.npy', im)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment