Skip to content
Snippets Groups Projects
Commit e4d23e3e authored by Schneider Leo's avatar Schneider Leo
Browse files

dataset pred for diann

parent 32d76e1d
No related branches found
No related tags found
No related merge requests found
...@@ -65,7 +65,7 @@ if __name__ =='__main__': ...@@ -65,7 +65,7 @@ if __name__ =='__main__':
model = ModelTransformer(encoder_ff=args.encoder_ff, decoder_rt_ff=args.decoder_rt_ff, model = ModelTransformer(encoder_ff=args.encoder_ff, decoder_rt_ff=args.decoder_rt_ff,
n_head=args.n_head, encoder_num_layer=args.encoder_num_layer, n_head=args.n_head, encoder_num_layer=args.encoder_num_layer,
decoder_rt_num_layer=args.decoder_rt_num_layer, drop_rate=args.drop_rate, decoder_rt_num_layer=args.decoder_rt_num_layer, drop_rate=args.drop_rate,
embedding_dim=args.embedding_dim, acti=args.activation, norm=args.norm_first) embedding_dim=args.embedding_dim, acti=args.activation, norm=args.norm_first, seq_length=30)
if torch.cuda.is_available(): if torch.cuda.is_available():
model = model.cuda() model = model.cuda()
......
...@@ -103,7 +103,8 @@ def main(args): ...@@ -103,7 +103,8 @@ def main(args):
model = ModelTransformer(encoder_ff=args.encoder_ff, decoder_rt_ff=args.decoder_rt_ff, model = ModelTransformer(encoder_ff=args.encoder_ff, decoder_rt_ff=args.decoder_rt_ff,
n_head=args.n_head, encoder_num_layer=args.encoder_num_layer, n_head=args.n_head, encoder_num_layer=args.encoder_num_layer,
decoder_rt_num_layer=args.decoder_rt_num_layer, drop_rate=args.drop_rate, decoder_rt_num_layer=args.decoder_rt_num_layer, drop_rate=args.drop_rate,
embedding_dim=args.embedding_dim, acti=args.activation, norm=args.norm_first) embedding_dim=args.embedding_dim, acti=args.activation, norm=args.norm_first,
seq_length=30)
if args.model_weigh is not None : if args.model_weigh is not None :
model.load_state_dict(torch.load(args.model_weigh, weights_only=True)) model.load_state_dict(torch.load(args.model_weigh, weights_only=True))
......
...@@ -38,7 +38,7 @@ class PositionalEncoding(nn.Module): ...@@ -38,7 +38,7 @@ class PositionalEncoding(nn.Module):
class ModelTransformer(nn.Module): class ModelTransformer(nn.Module):
def __init__(self, drop_rate=0.1, embedding_dim=128, nb_aa=22, regressor_layer_size_rt=512, decoder_rt_ff=512, def __init__(self, drop_rate=0.1, embedding_dim=128, nb_aa=22, regressor_layer_size_rt=512, decoder_rt_ff=512,
n_head=1, seq_length=25, encoder_ff=512, encoder_num_layer=1, decoder_rt_num_layer=1, acti='relu', n_head=1, seq_length=30, encoder_ff=512, encoder_num_layer=1, decoder_rt_num_layer=1, acti='relu',
norm=False): norm=False):
self.seq_length = seq_length self.seq_length = seq_length
self.nb_aa = nb_aa self.nb_aa = nb_aa
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment