diff --git a/osrt/model.py b/osrt/model.py index f5ed08ac75e6257e78427373ba6bac1d3359ef13..d98940686bc17d7a3dd24cc62708336feae0a011 100644 --- a/osrt/model.py +++ b/osrt/model.py @@ -131,8 +131,8 @@ class LitSlotAttentionAutoEncoder(pl.LightningModule): return recon_combined, recons, masks, slots, attn_slotwise.unsqueeze(-2).unflatten(-1, x.shape[-2:]) def configure_optimizers(self) -> Any: - optimizer = optim.Adam(self.parameters(), lr=1e-3, eps=1e-08) - return optimizer + self.optimizer = optim.Adam(self.parameters(), lr=1e-3, eps=1e-08) + return self.optimizer def one_step(self, image): x = self.encoder_cnn(image).movedim(1, -1) diff --git a/train_sa.py b/train_sa.py index 269319caec62329c5ca1c60aada061cc9a9b358e..a35c7c3e22b705e395329b537be91c2c558f9143 100644 --- a/train_sa.py +++ b/train_sa.py @@ -60,7 +60,6 @@ def main(): #### Create model model = LitSlotAttentionAutoEncoder(resolution, num_slots, num_iterations, cfg=cfg) - checkpoint_callback = ModelCheckpoint( save_top_k=10, monitor="val_psnr",