Skip to content
Snippets Groups Projects
Commit 0cda4a37 authored by Léo Schneider's avatar Léo Schneider
Browse files

test

parents
No related branches found
No related tags found
No related merge requests found
import numpy as np
import pandas as pd
from loess.loess_1d import loess_1d
from dataloader import RT_Dataset
from common_dataset import Common_Dataset
import matplotlib.pyplot as plt
ALPHABET_UNMOD = {
"_": 0,
"A": 1,
"C": 2,
"D": 3,
"E": 4,
"F": 5,
"G": 6,
"H": 7,
"I": 8,
"K": 9,
"L": 10,
"M": 11,
"N": 12,
"P": 13,
"Q": 14,
"R": 15,
"S": 16,
"T": 17,
"V": 18,
"W": 19,
"Y": 20,
"CaC": 21,
"OxM": 22
}
ALPHABET_UNMOD_REV = {v: k for k, v in ALPHABET_UNMOD.items()}
def numerical_to_alphabetical(arr):
seq = ''
for i in range(len(arr)):
seq+=ALPHABET_UNMOD_REV[arr[i]]
return seq
def align(dataset, reference):
seq_ref = reference['sequence']
seq_common = dataset['Sequence']
seq_ref = seq_ref.tolist()
seq_common = seq_common.tolist()
seq_ref = [tuple(l) for l in seq_ref]
seq_common = [tuple(l) for l in seq_common]
ind_dict_ref = dict((k, i) for i, k in enumerate(seq_ref))
inter = set(ind_dict_ref).intersection(seq_common)
ind_dict_ref = [ind_dict_ref[x] for x in inter]
indices_common = dict((k, i) for i, k in enumerate(seq_common))
indices_common = [indices_common[x] for x in inter]
rt_ref = reference['irt'][ind_dict_ref].reset_index()
rt_data = dataset['Retention time'][indices_common].reset_index()
xout, yout, wout = loess_1d(np.array(rt_data['Retention time'].tolist()), np.array(rt_ref['irt'].tolist()),
xnew=dataset['Retention time'],
degree=1, frac=0.5,
npoints=None, rotate=False, sigy=None)
dataset['Retention time'] = yout
return dataset
data_ori = RT_Dataset(None, 'database/data.csv', 'train', 25).data
data_ori ['sequence'] = data_ori['sequence'].map(numerical_to_alphabetical)
# data_ori = pd.read_pickle('database/data_01_16_DIA_ISA_55.pkl')
# data_ori = Common_Dataset(data_ori, 30).data
data_train = pd.read_pickle('database/data_DIA_ISA_55_train.pkl')
# data_train = Common_Dataset(data_train, 30).data
#
data_align = align(data_train, data_ori)
#
plt.scatter(data_train['Retention time'], data_align['Retention time'], s=1)
plt.savefig('test_align_2.png')
#
#
# dataset_ref = pd.read_pickle('database/data_01_16_DIA_ISA_55.pkl')
# data_ref = Common_Dataset(dataset_ref, 25).data
# dataset_2 = pd.read_pickle('database/data_01_20_DIA_ISA_55.pkl')
# data_2 = Common_Dataset(dataset_2, 25).data
# dataset_3 = pd.read_pickle('database/data_01_17_DIA_ISA_55.pkl')
# data_3 = Common_Dataset(dataset_3, 25).data
# dataset_4 = pd.read_pickle('database/data_01_23_DIA_ISA_55.pkl')
# data_4 = Common_Dataset(dataset_4, 25).data
# data_align_3 = align(data_3, data_ref)
# data_align_4 = align(data_4, data_ref)
#
# data = pd.concat([data_ref, data_2, data_align_3, data_align_4], ignore_index=True)
# data = data.drop(columns='index')
# data['Sequence'] = data['Sequence'].map(numerical_to_alphabetical)
# num_data = data.shape[0]
# train_num = np.floor(num_data*0.8)
# train_size=0
# list_train=[]
# list_test=[]
# groups = data.groupby('Sequence')
# for seq, gr in groups:
#
# train_size+= gr.shape[0]
#
# if train_size>train_num:
# list_test.append(gr)
# else:
# list_train.append(gr)
#
#
# dataset_train = pd.concat(list_train, ignore_index=True)
# dataset_test = pd.concat(list_test, ignore_index=True)
# dataset_train.to_pickle('database/data_DIA_ISA_55_train.pkl')
# dataset_train.to_pickle('database/data_DIA_ISA_55_test.pkl')
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment