From 3c1a2361daf4a831905093bde1807ac95702b5a9 Mon Sep 17 00:00:00 2001
From: Schneider Leo <leo.schneider@etu.ec-lyon.fr>
Date: Mon, 23 Jun 2025 11:02:57 +0200
Subject: [PATCH] fix : freeze grad

---
 main.py | 146 ++++++--------------------------------------------------
 1 file changed, 14 insertions(+), 132 deletions(-)

diff --git a/main.py b/main.py
index e7eb740..45e56e8 100644
--- a/main.py
+++ b/main.py
@@ -11,141 +11,23 @@ from sklearn.metrics import confusion_matrix
 import seaborn as sn
 import pandas as pd
 
-
-
-def train(model, data_train, optimizer, loss_function, epoch):
+def train_duo(model, data_train, optimizer, loss_function, epoch,freeze_backbone=False):
     model.train()
-    losses = 0.
-    acc = 0.
-    for param in model.parameters():
-        param.requires_grad = True
 
-    for im, label in data_train:
-        label = label.long()
-        if torch.cuda.is_available():
-            im, label = im.cuda(), label.cuda()
-        pred_logits = model.forward(im)
-        pred_class = torch.argmax(pred_logits,dim=1)
-        acc += (pred_class==label).sum().item()
-        loss = loss_function(pred_logits,label)
-        losses += loss.item()
-        optimizer.zero_grad()
-        loss.backward()
-        optimizer.step()
-    losses = losses/len(data_train.dataset)
-    acc = acc/len(data_train.dataset)
-    print('Train epoch {}, loss : {:.3f} acc : {:.3f}'.format(epoch,losses,acc))
-    return losses, acc
-
-def test(model, data_test, loss_function, epoch):
-    model.eval()
     losses = 0.
     acc = 0.
-    for param in model.parameters():
-        param.requires_grad = False
-
-    for im, label in data_test:
-        label = label.long()
-        if torch.cuda.is_available():
-            im, label = im.cuda(), label.cuda()
-        pred_logits = model.forward(im)
-        pred_class = torch.argmax(pred_logits,dim=1)
-        acc += (pred_class==label).sum().item()
-        loss = loss_function(pred_logits,label)
-        losses += loss.item()
-    losses = losses/len(data_test.dataset)
-    acc = acc/len(data_test.dataset)
-    print('Test epoch {}, loss : {:.3f} acc : {:.3f}'.format(epoch,losses,acc))
-    return losses,acc
-
-def run(args):
-    #load data
-    data_train, data_test = load_data(base_dir=args.dataset_dir, batch_size=args.batch_size)
-    #load model
-    model = Classification_model(model = args.model, n_class=len(data_train.dataset.dataset.classes))
-    #load weights
-    if args.pretrain_path is not None :
-        load_model(model,args.pretrain_path)
-    #move parameters to GPU
-    if torch.cuda.is_available():
-        model = model.cuda()
-    #init accumulator
-    best_acc = 0
-    train_acc=[]
-    train_loss=[]
-    val_acc=[]
-    val_loss=[]
-    #init training
-    loss_function = nn.CrossEntropyLoss()
-    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
-    #traing
-    for e in range(args.epoches):
-        loss, acc = train(model,data_train,optimizer,loss_function,e)
-        train_loss.append(loss)
-        train_acc.append(acc)
-        if e%args.eval_inter==0 :
-            loss, acc = test(model,data_test,loss_function,e)
-            val_loss.append(loss)
-            val_acc.append(acc)
-            if acc > best_acc :
-                save_model(model,args.save_path)
-                best_acc = acc
-    #plot and save training figs
-    plt.plot(train_acc)
-    plt.plot(val_acc)
-    plt.plot(train_acc)
-    plt.plot(train_acc)
-    plt.ylim(0, 1.05)
-    plt.show()
-    plt.savefig('output/training_plot_{}.png'.format(args.outname))
-
-    #load and evaluated best model
-    load_model(model, args.save_path)
-    make_prediction(model,data_test, 'output/confusion_matrix_{}.png'.format(args.outname))
-
-
-def make_prediction(model, data, f_name):
-    y_pred = []
-    y_true = []
-
-    # iterate over test data
-    for im, label in data:
-        label = label.long()
-        if torch.cuda.is_available():
-            im = im.cuda()
-        output = model(im)
-
-        output = (torch.max(torch.exp(output), 1)[1]).data.cpu().numpy()
-        y_pred.extend(output)
-
-        label = label.data.cpu().numpy()
-        y_true.extend(label)  # Save Truth
-    # constant for classes
-
-    classes = data.dataset.dataset.classes
-
-    # Build confusion matrix
-    cf_matrix = confusion_matrix(y_true, y_pred)
-    df_cm = pd.DataFrame(cf_matrix / np.sum(cf_matrix, axis=1)[:, None], index=[i for i in classes],
-                         columns=[i for i in classes])
-    plt.figure(figsize=(12, 7))
-    sn.heatmap(df_cm, annot=cf_matrix)
-    plt.savefig(f_name)
-
-
-def train_duo(model, data_train, optimizer, loss_function, epoch):
-    model.train()
-
-    losses = 0.
-    acc = 0.
-    for n, p in model.im_encoder.named_parameters():
-        if n in ['fc.weight', 'fc.bias']:
+    if freeze_backbone :
+        for n, p in model.im_encoder.named_parameters():
+            if n in ['fc.weight', 'fc.bias']:
+                p.requires_grad = True
+            else:
+                p.requires_grad = False
+
+        for n, p in model.predictor.named_parameters():
+            p.requires_grad = True
+    else:
+        for p in  model.parameters():
             p.requires_grad = True
-        else:
-            p.requires_grad = False
-
-    for n, p in model.predictor.named_parameters():
-        p.requires_grad = True
 
 
     for imaer,imana, label in data_train:
@@ -251,10 +133,10 @@ def run_duo(args):
     plt.ylim(0, 1.05)
     plt.show()
 
-    plt.savefig('output/training_plot_noise_{}_lr_{}_model_{}_{}.png'.format(args.noise_threshold,args.lr,args.model,args.model_type))
+    plt.savefig('output/training_plot_{}.png'.format(args.outname))
     #load and evaluate best model
     load_model(model, args.save_path)
-    make_prediction_duo(model,data_test, 'output/confusion_matrix_noise_{}_lr_{}_model_{}_{}.png'.format(args.noise_threshold,args.lr,args.model,args.model_type))
+    make_prediction_duo(model,data_test, 'output/confusion_matrix_{}.png'.format(args.outname))
 
 
 def make_prediction_duo(model, data, f_name):
-- 
GitLab