Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import numpy as np
import pandas as pd
from loess.loess_1d import loess_1d
from dataloader import RT_Dataset
from common_dataset import Common_Dataset
import matplotlib.pyplot as plt
ALPHABET_UNMOD = {
"_": 0,
"A": 1,
"C": 2,
"D": 3,
"E": 4,
"F": 5,
"G": 6,
"H": 7,
"I": 8,
"K": 9,
"L": 10,
"M": 11,
"N": 12,
"P": 13,
"Q": 14,
"R": 15,
"S": 16,
"T": 17,
"V": 18,
"W": 19,
"Y": 20,
"CaC": 21,
"OxM": 22
}
ALPHABET_UNMOD_REV = {v: k for k, v in ALPHABET_UNMOD.items()}
def numerical_to_alphabetical(arr):
seq = ''
for i in range(len(arr)):
seq+=ALPHABET_UNMOD_REV[arr[i]]
return seq
def align(dataset, reference):
seq_ref = reference['sequence']
seq_common = dataset['Sequence']
seq_ref = seq_ref.tolist()
seq_common = seq_common.tolist()
seq_ref = [tuple(l) for l in seq_ref]
seq_common = [tuple(l) for l in seq_common]
ind_dict_ref = dict((k, i) for i, k in enumerate(seq_ref))
inter = set(ind_dict_ref).intersection(seq_common)
ind_dict_ref = [ind_dict_ref[x] for x in inter]
indices_common = dict((k, i) for i, k in enumerate(seq_common))
indices_common = [indices_common[x] for x in inter]
rt_ref = reference['irt'][ind_dict_ref].reset_index()
rt_data = dataset['Retention time'][indices_common].reset_index()
xout, yout, wout = loess_1d(np.array(rt_data['Retention time'].tolist()), np.array(rt_ref['irt'].tolist()),
xnew=dataset['Retention time'],
degree=1, frac=0.5,
npoints=None, rotate=False, sigy=None)
dataset['Retention time'] = yout
return dataset
data_ori = RT_Dataset(None, 'database/data.csv', 'train', 25).data
data_ori ['sequence'] = data_ori['sequence'].map(numerical_to_alphabetical)
# data_ori = pd.read_pickle('database/data_01_16_DIA_ISA_55.pkl')
# data_ori = Common_Dataset(data_ori, 30).data
data_train = pd.read_pickle('database/data_DIA_ISA_55_train.pkl')
# data_train = Common_Dataset(data_train, 30).data
#
data_align = align(data_train, data_ori)
#
plt.scatter(data_train['Retention time'], data_align['Retention time'], s=1)
plt.savefig('test_align_2.png')
#
#
# dataset_ref = pd.read_pickle('database/data_01_16_DIA_ISA_55.pkl')
# data_ref = Common_Dataset(dataset_ref, 25).data
# dataset_2 = pd.read_pickle('database/data_01_20_DIA_ISA_55.pkl')
# data_2 = Common_Dataset(dataset_2, 25).data
# dataset_3 = pd.read_pickle('database/data_01_17_DIA_ISA_55.pkl')
# data_3 = Common_Dataset(dataset_3, 25).data
# dataset_4 = pd.read_pickle('database/data_01_23_DIA_ISA_55.pkl')
# data_4 = Common_Dataset(dataset_4, 25).data
# data_align_3 = align(data_3, data_ref)
# data_align_4 = align(data_4, data_ref)
#
# data = pd.concat([data_ref, data_2, data_align_3, data_align_4], ignore_index=True)
# data = data.drop(columns='index')
# data['Sequence'] = data['Sequence'].map(numerical_to_alphabetical)
# num_data = data.shape[0]
# train_num = np.floor(num_data*0.8)
# train_size=0
# list_train=[]
# list_test=[]
# groups = data.groupby('Sequence')
# for seq, gr in groups:
#
# train_size+= gr.shape[0]
#
# if train_size>train_num:
# list_test.append(gr)
# else:
# list_train.append(gr)
#
#
# dataset_train = pd.concat(list_train, ignore_index=True)
# dataset_test = pd.concat(list_test, ignore_index=True)
# dataset_train.to_pickle('database/data_DIA_ISA_55_train.pkl')
# dataset_train.to_pickle('database/data_DIA_ISA_55_test.pkl')