diff --git a/model_custom.py b/model_custom.py
index e4601b37bf9298561fd7d2606d93c2d9f9185e78..7f8a7d107d9091b5250354416087737799061f97 100644
--- a/model_custom.py
+++ b/model_custom.py
@@ -101,22 +101,14 @@ class Model_Common_Transformer(nn.Module):
                                                 d_model=self.embedding_dim)
 
     def forward(self, seq, charge):
-        print('seq', seq.size())
-        print('charge', charge.size())
         meta_ohe = torch.nn.functional.one_hot(charge - 1, self.charge_max).float()
-        print('meta_ohe', meta_ohe.size())
         seq_emb = torch.nn.functional.one_hot(seq, self.nb_aa).float()
-        print('seq_emb', seq_emb.size())
         emb = self.pos_embedding(self.emb(seq_emb))
-        print('emb', emb.size())
         meta_enc = self.meta_enc(meta_ohe)
-        print('meta_enc', meta_enc.size())
+        print('emb', emb)
         enc = self.encoder(emb)
-        print('enc', enc.size())
         out_rt = self.decoder_RT(enc)
-        print('out_rt', out_rt.size())
         int_enc = torch.mul(enc, meta_enc)
-        print('out_rt', out_rt.size())
         out_int = self.decoder_int(int_enc)
 
         return out_rt.flatten(), out_int