diff --git a/image_ref/main.py b/image_ref/main.py index 1c3299b77c2e33b491d545cbc1ba25fb82d26710..ed7d6d17ab73c1a983b22b3d2f3228b8ef59d351 100644 --- a/image_ref/main.py +++ b/image_ref/main.py @@ -54,6 +54,7 @@ def val_duo(model, data_test, loss_function, epoch, wandb): losses = 0. acc = 0. acc_contrastive = 0. + softmax = nn.Softmax(dim=1) for param in model.parameters(): param.requires_grad = False @@ -74,7 +75,8 @@ def val_duo(model, data_test, loss_function, epoch, wandb): label = label.cuda() label_class = torch.argmin(label).data.cpu().numpy() pred_logits = model.forward(imaer, imana, img_ref) - pred_class = torch.argmax(pred_logits[:, 0]).tolist() + confidence = softmax(pred_logits) + pred_class = torch.argmax(confidence[:, 0]).tolist() acc_contrastive += ( torch.argmax(pred_logits, dim=1).data.cpu().numpy() == label.data.cpu().numpy()).sum().item() acc += (pred_class == label_class) @@ -214,6 +216,7 @@ def make_prediction_duo(model, data, f_name, f_name2): confidence_pred_list = [[] for i in range(n_class)] y_pred = [] y_true = [] + soft_max = nn.Softmax(dim=1) # iterate over test data for imaer, imana, img_ref, label in data: imaer = imaer.float() @@ -233,7 +236,8 @@ def make_prediction_duo(model, data, f_name, f_name2): imana = imana.cuda() img_ref = img_ref.cuda() label = label.cuda() - confidence = model(imaer, imana, img_ref) + output = model(imaer, imana, img_ref) + confidence = soft_max(output) print(label) print(confidence) confidence_pred_list[specie].append(confidence[:, 0].data.cpu().numpy()) diff --git a/image_ref/model.py b/image_ref/model.py index 0f702fb60cd5c8b96bf580b720b8d77a47f705c9..5302e3b122d79d71372a781aee834f805840f5a4 100644 --- a/image_ref/model.py +++ b/image_ref/model.py @@ -287,7 +287,7 @@ class Classification_model_duo_contrastive(nn.Module): self.im_encoder = resnet34(num_classes=2, in_channels=2) self.predictor = nn.Linear(in_features=2*2,out_features=2) - self.soft_max = nn.Softmax(dim=1) + def forward(self, input_aer, input_ana, input_ref): input_ana = torch.concat([input_ana, input_ref], dim=1) @@ -295,5 +295,4 @@ class Classification_model_duo_contrastive(nn.Module): out_aer = self.im_encoder(input_aer) out_ana = self.im_encoder(input_ana) out = torch.concat([out_aer,out_ana],dim=1) - out = self.predictor(out) - return self.soft_max(out) \ No newline at end of file + return self.predictor(out) \ No newline at end of file