diff --git a/main_custom.py b/main_custom.py index 8ddbdea155ac7fd4e3306666ef8c6db235f7733c..7c7421b3fef9cc87dd40b42b8d0b12bc1af7673f 100644 --- a/main_custom.py +++ b/main_custom.py @@ -26,7 +26,6 @@ def train(model, data_train, epoch, optimizer, criterion_rt, criterion_intensity param.requires_grad = True if forward == 'both': for seq, charge, rt, intensity in data_train: - print(seq, charge, rt, intensity ) rt, intensity = rt.float(), intensity.float() if torch.cuda.is_available(): seq, charge, rt, intensity = seq.cuda(), charge.cuda(), rt.cuda(), intensity.cuda() @@ -288,7 +287,6 @@ def get_n_params(model): for s in list(p.size()): nn = nn * s - print(n, nn) pp += nn return pp diff --git a/model_custom.py b/model_custom.py index eddc158c4f1dffcedaa3ae029222ab3862bc4193..6745da35457b1a067ac1e067ba1743f6f180f136 100644 --- a/model_custom.py +++ b/model_custom.py @@ -102,6 +102,8 @@ class Model_Common_Transformer(nn.Module): def forward(self, seq, charge): meta_ohe = torch.nn.functional.one_hot(charge - 1, self.charge_max).float() seq_emb = torch.nn.functional.one_hot(seq, self.nb_aa).float() + print(seq_emb.shape()) + print(self.nb_aa, self.embedding_dim) emb = self.pos_embedding(self.emb(seq_emb)) meta_enc = self.meta_enc(meta_ohe)