From cb919de96a970ca7454c2dac35282926ce5323a3 Mon Sep 17 00:00:00 2001
From: maali <mahmoud-ahmed.ali@liris.cnrs.fr>
Date: Mon, 13 Mar 2023 14:02:34 +0100
Subject: [PATCH] Update the paths in the training part

---
 models.py | 120 ++++++++++++++++++------------------------------------
 1 file changed, 40 insertions(+), 80 deletions(-)

diff --git a/models.py b/models.py
index 4934cbd..5552a29 100644
--- a/models.py
+++ b/models.py
@@ -5,6 +5,7 @@ from tensorflow.keras import backend as K
 from classes import modelSet, modelDictVal
 import random
 from data import *
+import argparse
 
 huberDelta = .5
 
@@ -237,7 +238,8 @@ def stvNetNew(inputShape=(480, 640, 3), outVectors=True, outClasses=True, modelN
     return tf.keras.Model(inputs=xIn, outputs=outputs, name=modelName)
 
 
-def uNet(inputShape=(480, 640, 3), outVectors=True, outClasses=True, modelName="uNet"):  # neural net structure used for image segmentation
+def uNet(inputShape=(480, 640, 3), outVectors=True, outClasses=True,
+         modelName="uNet"):  # neural net structure used for image segmentation
     xIn = tf.keras.Input(inputShape, dtype=np.dtype('uint8'))
 
     x = tf.keras.layers.Lambda(lambda x: x / 255)(xIn)
@@ -305,7 +307,7 @@ def uNet(inputShape=(480, 640, 3), outVectors=True, outClasses=True, modelName="
     return tf.keras.Model(inputs=[xIn], outputs=outputs, name=modelName)
 
 
-def trainModel(modelStruct, modelGen, modelClass='cat', batchSize=2, optimizer=tf.keras.optimizers.Adam,
+def trainModel_Fruits(modelStruct, modelGen, modelClass='cat', batchSize=2, optimizer=tf.keras.optimizers.Adam,
                learning_rate=0.01, losses=None, metrics=None, saveModel=True, modelName='stvNet_weights',
                epochs=1, loss_weights=None, outVectors=False, outClasses=False, dataSplit=True, altLabels=True,
                augmentation=True):  # train and save model weights
@@ -321,11 +323,12 @@ def trainModel(modelStruct, modelGen, modelClass='cat', batchSize=2, optimizer=t
 
     trainData, validData = None, None
     if dataSplit:  # if using datasplit, otherwise all available data is used
-        trainData, validData = data.getDataSplit(modelClass=modelClass)
+        trainData, validData = data.getDataSplit_Fruits(modelClass=modelClass)
 
     logger = tf.keras.callbacks.CSVLogger("models/history/" + modelName + "_" + modelClass + "_history.csv",
                                           append=True)
-    # evalLogger = tf.keras.callbacks.CSVLogger("models/history/" + modelName + "_" + modelClass + "_eval_history.csv", append = True)
+    evalLogger = tf.keras.callbacks.CSVLogger("models/history/" + modelName + "_" + modelClass + "_eval_history.csv",
+                                              append=True)
 
     history, valHistory = [], []
 
@@ -336,14 +339,15 @@ def trainModel(modelStruct, modelGen, modelClass='cat', batchSize=2, optimizer=t
                 print("Epoch {0} of {1}".format(i + 1, epochs))
                 hist = model.fit(modelGen(modelClass, batchSize, masterList=trainData, out0=outKeys[0], out1=outKeys[1],
                                           altLabels=altLabels, augmentation=augmentation),
-                                 steps_per_epoch=math.ceil(len(trainData) / batchSize), max_queue_size=2,
+                                 steps_per_epoch=math.ceil(len(trainData)), max_queue_size=2,
                                  callbacks=[logger])
                 history.append(hist.history)
                 if dataSplit:
-                    print("Validation:")
+                    print("################ Validation: ############")
                     valHist = model.evaluate(
-                        modelGen(modelClass, batchSize, masterList=validData, out0=outKeys[0], out1=outKeys[1],
-                                 altLabels=altLabels, augmentation=False), steps=math.ceil(len(validData) / batchSize),
+                        modelGen(modelClass, batchSize, val=True, masterList=validData, out0=outKeys[0],
+                                 out1=outKeys[1],
+                                 altLabels=altLabels, augmentation=False), steps=math.ceil(len(validData)),
                         max_queue_size=2)
                     valHistory.append(valHist)
         else:
@@ -353,13 +357,14 @@ def trainModel(modelStruct, modelGen, modelClass='cat', batchSize=2, optimizer=t
             print("Epoch {0} of {1}".format(i + 1, epochs))
             hist = model.fit(
                 modelGen(modelClass, batchSize, masterList=trainData, altLabels=altLabels, augmentation=augmentation),
-                steps_per_epoch=math.ceil(len(trainData) / batchSize), max_queue_size=2, callbacks=[logger])
+                steps_per_epoch=math.ceil(len(trainData)), max_queue_size=2, callbacks=[logger])
             history.append(hist.history)
             if dataSplit:
-                print("Validation:")
+                print("########## Validation: ############2")
                 valHist = model.evaluate(
-                    modelGen(modelClass, batchSize, masterList=validData, altLabels=altLabels, augmentation=False),
-                    steps=math.ceil(len(validData) / batchSize), max_queue_size=2)
+                    modelGen(modelClass, batchSize, val=True, masterList=validData, altLabels=altLabels,
+                             augmentation=False),
+                    steps=math.ceil(len(validData)), max_queue_size=2)
                 valHistory.append(valHist)
 
     historyLog = {"struct": modelStruct.__name__,
@@ -391,11 +396,10 @@ def trainModel(modelStruct, modelGen, modelClass='cat', batchSize=2, optimizer=t
     return model
 
 
-
-def trainModel_Fruits(modelStruct, modelGen, modelClass='cat', batchSize=2, optimizer=tf.keras.optimizers.Adam,
-               learning_rate=0.01, losses=None, metrics=None, saveModel=True, modelName='stvNet_weights',
-               epochs=1, loss_weights=None, outVectors=False, outClasses=False, dataSplit=True, altLabels=True,
-               augmentation=True):  # train and save model weights
+def trainModel(modelStruct, modelGen, modelClass='cat', batchSize=2, optimizer=tf.keras.optimizers.Adam,
+                      learning_rate=0.01, losses=None, metrics=None, saveModel=True, modelName='stvNet_weights',
+                      epochs=1, loss_weights=None, outVectors=False, outClasses=False, dataSplit=True, altLabels=True,
+                      augmentation=True):  # train and save model weights
     if metrics is None:
         metrics = ['accuracy']
     if not (outVectors or outClasses):
@@ -427,7 +431,7 @@ def trainModel_Fruits(modelStruct, modelGen, modelClass='cat', batchSize=2, opti
                                  callbacks=[logger])
                 history.append(hist.history)
                 if dataSplit:
-                    print("Validation:")
+                    print("############# Validation: ##############")
                     valHist = model.evaluate(
                         modelGen(modelClass, batchSize, masterList=validData, out0=outKeys[0], out1=outKeys[1],
                                  altLabels=altLabels, augmentation=False), steps=math.ceil(len(validData) / batchSize),
@@ -443,7 +447,7 @@ def trainModel_Fruits(modelStruct, modelGen, modelClass='cat', batchSize=2, opti
                 steps_per_epoch=math.ceil(len(trainData) / batchSize), max_queue_size=2, callbacks=[logger])
             history.append(hist.history)
             if dataSplit:
-                print("Validation:")
+                print("############ Validation: ##########2")
                 valHist = model.evaluate(
                     modelGen(modelClass, batchSize, masterList=validData, altLabels=altLabels, augmentation=False),
                     steps=math.ceil(len(validData) / batchSize), max_queue_size=2)
@@ -482,7 +486,7 @@ def trainModels(modelSets, shutDown=False):
     for modelSet in modelSets:
         print("Training {0}".format(modelSet.name))
         model = modelsDict[modelSet.name]
-        trainModel(model.structure, model.generator, modelClass=modelSet.modelClass, epochs=model.epochs,
+        trainModel_Fruits(model.structure, model.generator, modelClass=modelSet.modelClass, epochs=model.epochs,
                    losses=model.losses, modelName=modelSet.name, outClasses=model.outClasses,
                    outVectors=model.outVectors, learning_rate=model.lr, metrics=model.metrics,
                    altLabels=model.altLabels, augmentation=model.augmentation)
@@ -501,7 +505,7 @@ def evaluateModel(modelStruct, modelName, evalGen, modelClass='cat', outVectors=
         metrics = ['accuracy']
     model = tf.keras.models.load_model(
         os.path.dirname(os.path.realpath(__file__)) + '/models/' + modelName + '_' + modelClass)
-    model.evaluate(evalGen(modelClass, batchSize), steps=samples // batchSize)
+    model.evaluate(evalGen(modelClass, batchSize), steps=samples, val=True)
 
 
 def evaluateModels(modelSets, batchSize=2, dataSplit=True):
@@ -516,13 +520,13 @@ def evaluateModels(modelSets, batchSize=2, dataSplit=True):
                 model.evaluate(
                     modelEnt.generator(modelSet.modelClass, batchSize=batchSize, masterList=validData, out0=outKeys[0],
                                        out1=outKeys[1], altLabels=modelEnt.altLabels, augmentation=False),
-                    steps=math.ceil(len(validData) / batchSize), max_queue_size=2)
+                    steps=math.ceil(len(validData)), max_queue_size=2)
             else:
                 raise Exception("Probably shouldn't be here ever..")
         else:
-            model.evaluate(modelEnt.generator(modelSet.modelClass, batchSize=batchSize, masterList=validData,
+            model.evaluate(modelEnt.generator(modelSet.modelClass, val=True, batchSize=batchSize, masterList=validData,
                                               altLabels=modelEnt.altLabels, augmentation=False),
-                           steps=math.ceil(len(validData) / batchSize), max_queue_size=2)
+                           steps=math.ceil(len(validData)), max_queue_size=2)
 
 
 def trainModelClassGen(modelStruct, modelName, losses, modelClass='cat', batchSize=2,
@@ -666,70 +670,26 @@ class generatorClass:  # simulates generator behaviour, unused
             output[self.outVecName] = np.array(yCoordBatch)
         if self.outClasses:
             output[self.outClassName] = np.array(yClassBatch)
-        return (False, np.array(xBatch), output)
+        return False, np.array(xBatch), output
 
 
 modelsDict = {
     'uNet_classes': modelDictVal(uNet, data.classTrainingGenerator, tf.keras.losses.BinaryCrossentropy(), False, True,
-                                 epochs=20, lr=0.001, augmentation=False),
-    'uNet_coords': modelDictVal(uNet, data.coordsTrainingGenerator, tf.keras.losses.Huber(), True, False, epochs=5,
-                                lr=0.001, metrics=['mae', 'mse']),
-    'uNet_coords_smooth': modelDictVal(uNet, data.coordsTrainingGenerator, smoothL1, True, False, epochs=3, lr=0.0001,
-                                       metrics=['mae', 'mse']),
-    'stvNet': modelDictVal(stvNet, data.combinedTrainingGenerator,
-                           {'coordsOut': tf.keras.losses.Huber(), 'classOut': tf.keras.losses.BinaryCrossentropy()},
-                           True, True, epochs=5, lr=0.00005,
-                           metrics={'coordsOut': ['mae', 'mse'], "classOut": ['accuracy']}),
-    'stvNet_coords_slow_learner': modelDictVal(stvNet, data.coordsTrainingGenerator, tf.keras.losses.Huber(), True,
-                                               False, epochs=40, lr=0.00001, metrics=['mae', 'mse'],
-                                               outVecName='coordsOut'),
-    'stvNetAltLabels': modelDictVal(stvNet, data.combinedTrainingGenerator, {'coordsOut': tf.keras.losses.Huber(),
-                                                                             'classOut': tf.keras.losses.BinaryCrossentropy()},
-                                    True, True, epochs=10, lr=0.001,
-                                    metrics={'coordsOut': ['mae', 'mse'], "classOut": ['accuracy']}, altLabels=True,
-                                    augmentation=True),
-    'stvNetNormLabels': modelDictVal(stvNet, data.combinedTrainingGenerator, {'coordsOut': tf.keras.losses.Huber(),
-                                                                              'classOut': tf.keras.losses.BinaryCrossentropy()},
-                                     True, True, epochs=10, lr=0.001,
-                                     metrics={'coordsOut': ['mae', 'mse'], "classOut": ['accuracy']}, altLabels=False,
-                                     augmentation=True),
-    'stvNet_coords': modelDictVal(stvNet, data.coordsTrainingGenerator, tf.keras.losses.Huber(), True, False, epochs=20,
-                                  lr=0.001, metrics=['mae', 'mse'], altLabels=False, augmentation=True),
-    'stvNet_coords_altLabels': modelDictVal(stvNet, data.coordsTrainingGenerator, tf.keras.losses.Huber(), True, False,
-                                            epochs=20, lr=0.001, metrics=['mae', 'mse'], altLabels=True,
-                                            augmentation=True),
-    'stvNet_coords_altLabels_noAug': modelDictVal(stvNet, data.coordsTrainingGenerator, tf.keras.losses.Huber(), True,
-                                                  False, epochs=20, lr=0.001, metrics=['mae', 'mse'], altLabels=True,
-                                                  augmentation=False),
-    'stvNet_coords_noAug': modelDictVal(stvNet, data.coordsTrainingGenerator, tf.keras.losses.Huber(), True, False,
-                                        epochs=20, lr=0.001, metrics=['mae', 'mse'], altLabels=False,
-                                        augmentation=False),
-    'stvNet_classes': modelDictVal(stvNet, data.classTrainingGenerator, tf.keras.losses.BinaryCrossentropy(), False,
-                                   True, epochs=10, lr=0.001, altLabels=False, augmentation=True),
-    'stvNet_classes_noAug': modelDictVal(stvNet, data.classTrainingGenerator, tf.keras.losses.BinaryCrossentropy(),
-                                         False, True, epochs=10, lr=0.001, altLabels=False, augmentation=False),
-    'stvNet_new_coords_alt': modelDictVal(stvNetNew, data.coordsTrainingGenerator, tf.keras.losses.Huber(), True, False,
-                                          epochs=20, lr=0.001, metrics=['mae', 'mse'], altLabels=True,
-                                          augmentation=False),
+                                 epochs=50, lr=0.0001, augmentation=True),
+
     'stvNet_new_coords': modelDictVal(stvNetNew, data.coordsTrainingGenerator, tf.keras.losses.Huber(), True, False,
-                                      epochs=20, lr=0.001, metrics=['mae', 'mse'], altLabels=False, augmentation=False),
-    'stvNet_new_coords_alt_aug': modelDictVal(stvNetNew, data.coordsTrainingGenerator, tf.keras.losses.Huber(), True,
-                                              False, epochs=20, lr=0.001, metrics=['mae', 'mse'], altLabels=True,
-                                              augmentation=True),
-    'stvNet_new_coords_aug': modelDictVal(stvNetNew, data.coordsTrainingGenerator, tf.keras.losses.Huber(), True, False,
-                                          epochs=20, lr=0.001, metrics=['mae', 'mse'], altLabels=False,
-                                          augmentation=True),
-    'stvNet_new_classes': modelDictVal(stvNetNew, data.classTrainingGenerator, tf.keras.losses.BinaryCrossentropy(),
-                                       False, True, epochs=20, lr=0.001, augmentation=False),
-    'stvNet_new_combined': modelDictVal(stvNetNew, data.combinedTrainingGenerator,
-                                        {'coordsOut': tf.keras.losses.Huber(),
-                                         'classOut': tf.keras.losses.BinaryCrossentropy()}, True, True, epochs=20,
-                                        lr=0.001, metrics={'coordsOut': ['mae', 'mse'], "classOut": ['accuracy']},
-                                        augmentation=False),
+                                      epochs=50, lr=0.0001, metrics=['mae', 'mse'], altLabels=False, augmentation=False)
 }
 
 if __name__ == "__main__":
-    class_name = 'pear'
+    ap = argparse.ArgumentParser()
+    ap.add_argument("-cls_name", "--class_name", type=str,
+                    default='kiwi1',
+                    help="[kiwi1, pear2, banana1, orange, peach1]")
+
+    args = vars(ap.parse_args())
+
+    class_name = args["class_name"]
     modelSets = [modelSet('uNet_classes', class_name), modelSet('stvNet_new_coords', class_name)]
     trainModels(modelSets)
     evaluateModels(modelSets)
-- 
GitLab