#!/bin/bash

#SBATCH --job-name=train_msn_
#SBATCH --output=logs/job.%j.out
#SBATCH --error=logs/job.%j.err 

#SBATCH --partition=gpu
#SBATCH --gres=gpu:2
#SBATCH --nodes=1
#SBATCH --mem=48G
#SBATCH --ntasks-per-node=2 #number of MPI tasks per node (=number of GPUs per node)
#SBATCH --cpus-per-task=1
#SBATCH -t 16:00:00
#SBATCH --mail-user=alexandre.chapin@ec-lyon.fr
#SBATCH --mail-typ=FAIL

echo ${SLURM_NODELIST}

torchrun --standalone --nnodes 1 --nproc_per_node 2 train.py runs/msn/osrt/config.yaml --wandb