Skip to content
Snippets Groups Projects
Commit 74beea9a authored by Schneider Leo's avatar Schneider Leo
Browse files

datasets

parent e7715f8d
No related branches found
No related tags found
No related merge requests found
...@@ -42,7 +42,7 @@ class Model_Common_Transformer(nn.Module): ...@@ -42,7 +42,7 @@ class Model_Common_Transformer(nn.Module):
def __init__(self, drop_rate=0.1, embedding_dim=128, nb_aa=23, def __init__(self, drop_rate=0.1, embedding_dim=128, nb_aa=23,
regressor_layer_size_rt=512, regressor_layer_size_int=512, decoder_rt_ff=512, decoder_int_ff=512, regressor_layer_size_rt=512, regressor_layer_size_int=512, decoder_rt_ff=512, decoder_int_ff=512,
n_head=1, seq_length=25, n_head=1, seq_length=25,
charge_max=4, charge_frag_max=3, encoder_ff=512, encoder_num_layer=1, decoder_rt_num_layer=1, charge_max=5, charge_frag_max=3, encoder_ff=512, encoder_num_layer=1, decoder_rt_num_layer=1,
decoder_int_num_layer=1, acti='relu', norm=False): decoder_int_num_layer=1, acti='relu', norm=False):
self.charge_max = charge_max self.charge_max = charge_max
self.seq_length = seq_length self.seq_length = seq_length
...@@ -101,14 +101,9 @@ class Model_Common_Transformer(nn.Module): ...@@ -101,14 +101,9 @@ class Model_Common_Transformer(nn.Module):
d_model=self.embedding_dim) d_model=self.embedding_dim)
def forward(self, seq, charge): def forward(self, seq, charge):
print(seq.size(),charge.size())
print(seq, charge)
print(torch.cuda.mem_get_info()) print(torch.cuda.mem_get_info())
meta_ohe = torch.nn.functional.one_hot(charge - 1, self.charge_max).float() meta_ohe = torch.nn.functional.one_hot(charge - 1, self.charge_max).float()
seq_emb = torch.nn.functional.one_hot(seq, self.nb_aa).float() seq_emb = torch.nn.functional.one_hot(seq, self.nb_aa).float()
print(seq_emb.shape)
print(self.nb_aa, self.embedding_dim)
print(seq_emb)
emb = self.pos_embedding(self.emb(seq_emb)) emb = self.pos_embedding(self.emb(seq_emb))
meta_enc = self.meta_enc(meta_ohe) meta_enc = self.meta_enc(meta_ohe)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment