From f367b0475b6fae1b7cb89e92f99a058308de383d Mon Sep 17 00:00:00 2001
From: alexcbb <alexchapin@hotmail.fr>
Date: Mon, 24 Jul 2023 11:03:47 +0200
Subject: [PATCH] Fix param SlotAttAutoEncod

---
 osrt/model.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/osrt/model.py b/osrt/model.py
index 0db162a..0102d61 100644
--- a/osrt/model.py
+++ b/osrt/model.py
@@ -125,7 +125,7 @@ class SlotAttentionAutoEncoder(nn.Module):
                 input_dim=64,
                 slot_dim=64,
                 hidden_dim=128,
-                iters=self.num_iterations)
+                depth=self.num_iterations) # in a way, the depth of the transformer corresponds to the number of iterations in the original model
 
     def forward(self, image):
         # `image` has shape: [batch_size, num_channels, width, height].
-- 
GitLab