From 40b3e79e78f8de37d459649f7d2313433e3320fa Mon Sep 17 00:00:00 2001
From: Devashish Lohani <devashish.lohani@univ-lyon2.fr>
Date: Fri, 7 Aug 2020 16:04:02 +0200
Subject: [PATCH] Generic variable names, comments & code cleaning

---
 data_management.py |  48 ++++-------
 h5py_init.py       | 202 +++++++++++++++------------------------------
 img_exp.py         |   4 +-
 models.py          |  45 ++--------
 seq_exp.py         |  39 ++++-----
 util.py            |  13 +--
 6 files changed, 112 insertions(+), 239 deletions(-)

diff --git a/data_management.py b/data_management.py
index a401f1e..bb81624 100644
--- a/data_management.py
+++ b/data_management.py
@@ -10,11 +10,7 @@ import sys
 from h5py_init import init_videos, init_data_by_class
 root_drive = '.'
 
-#if not os.path.isdir(root_drive):
-#    print('Using Sharcnet equivalent of root_drive')
-#    root_drive = '/home/jjniatsl/project/jjniatsl/Fall-Data'
-
-def get_windowed_data(dset = 'Thermal', ADL_only = True, win_len = 8, img_width = 64, img_height = 64, avoid_vid =None, save_np=False):
+def get_windowed_data(dset = 'Thermal_Fall', ADL_only = True, win_len = 8, img_width = 64, img_height = 64, avoid_vid =None, save_np=False):
 
     '''
     Creates windowed version of dset data. avoid a video if required!
@@ -40,7 +36,7 @@ def get_windowed_data(dset = 'Thermal', ADL_only = True, win_len = 8, img_width
             print(data_dict)
             if ADL_only == True:
                 data_dict = dict((key, value) for key, value in data_dict.items() if
-                     ('adl' in key or 'ADL' in key or 'NA' in key) and (key != avoid_vid))
+                     ('ADL' in key or 'NA' in key) and (key != avoid_vid))
             print(data_dict)
             vids_win = create_windowed_arr_per_vid(vids_dict = data_dict, \
                         stride = 1, \
@@ -62,11 +58,11 @@ def get_windowed_data(dset = 'Thermal', ADL_only = True, win_len = 8, img_width
 
     return vids_win
 
-def init_windowed_arr(dset = 'Thermal', ADL_only = True, win_len = 8, img_width = 64, img_height = 64):
+def init_windowed_arr(dset = 'Thermal_Fall', ADL_only = True, win_len = 8, img_width = 64, img_height = 64):
 
     '''
     Creates windowed version of dset data. Saves windowed array to
-    'npData/ADL_data-proc-win_{}.npy'.format(train_or_test, dset, win_len), vids_win)
+    npData folder as npy file
 
     Params:
         str dset: dataset to use
@@ -81,29 +77,28 @@ def init_windowed_arr(dset = 'Thermal', ADL_only = True, win_len = 8, img_width
 
     if not os.path.isfile(master_path):
         print('initializing h5py..')
-        init_videos(img_width = img_width, img_height = img_height, raw = False, dset = dset)
+        init_videos(img_width=img_width, img_height=img_height, raw=False, dset=dset)
 
     with h5py.File(master_path, 'r') as hf:
 
             data_dict = hf[dset + '/Processed/Split_by_video']
-
             if ADL_only == True:
-                data_dict = dict((key,value) for\
-                 key, value in data_dict.items() if 'adl' in key or 'ADL' in key or 'NA' in key) #Get only ADL vids
+                # Get only normal behaviour vids
+                data_dict = dict((key,value) for key, value in data_dict.items() if 'ADL' in key or 'NA' in key)
 
-            vids_win = create_windowed_arr_per_vid\
-                        (vids_dict = data_dict, \
-                        stride = 1, \
-                        win_len = win_len,\
-                        img_width= img_width,\
-                        img_height= img_height)
+            vids_win = create_windowed_arr_per_vid(
+                        vids_dict=data_dict,
+                        stride=1,
+                        win_len=win_len,
+                        img_width=img_width,
+                        img_height=img_height)
 
             if ADL_only == True:
                 save_path = root_drive + '/npData/{}/'.format(dset)
 
                 if not os.path.isdir(save_path):
                     os.makedirs(save_path)
-                save_path = save_path + 'ADL_data-proc-win_{}.npy'.format(win_len)
+                save_path = save_path + 'training_data-imgdim_{}x{}-win_{}.npy'.format(img_width, img_height, win_len)
 
                 print('saving data to ', save_path)
                 np.save(save_path, vids_win)
@@ -257,20 +252,15 @@ def create_windowed_arr_per_vid(vids_dict, stride, win_len, img_width, img_heigh
 
     '''
     returns windows made of all videos
-    Assumes vids_dict is h5py structure, ie. vids_dict = hf['Data_2017/UR/Raw/Split_by_video']
+    Assumes vids_dict is h5py structure
     data set must contain atleast win_len frames
     '''
 
     vid_list = [len(vid['Data'][:]) for vid in list(vids_dict.values())]
-
     num_windowed = sum([int( np.floor((val-win_len)/stride) ) + 1 for val in vid_list])
-
     output_shape = (num_windowed, win_len, img_width, img_height, 1)
-
     print(output_shape)
-
     total = np.zeros(output_shape)
-
     i = 0
     for vid, name in zip(vids_dict.values(), vids_dict.keys()):
         print('windowing vid at', name)
@@ -293,14 +283,12 @@ def create_windowed_arr(arr, stride, win_len):
 
     img_width, img_height = arr.shape[1], arr.shape[2]
 
-    output_length = int( np.floor((len(arr) - win_len) / stride) ) + 1
+    output_length = int(np.floor((len(arr) - win_len) / stride)) + 1
     output_shape = (output_length, win_len, img_width, img_height, 1)
-
     total = np.zeros(output_shape)
-
     i = 0
     while i < output_length:
-        next_chunk = np.array( [arr[i+j] for j in range(win_len)] )
+        next_chunk = np.array([arr[i+j] for j in range(win_len)])
         # Can use np.arange if want to use time step i.e. np.arange(0,win_len,dt)
         total[i] = next_chunk
         i = i + stride
@@ -310,7 +298,7 @@ def create_windowed_arr(arr, stride, win_len):
 
 
 def load_data(split_by_vid_or_class = 'Split_by_vid', raw = False, img_width = 64, \
-    img_height = 64, vid_class = 'NonFall', dset = 'Thermal'):
+    img_height = 64, vid_class = 'NonFall', dset = 'Thermal_Dummy'):
 
     """
     Note :to use this function, need to have downloaded h5py for dset,
diff --git a/h5py_init.py b/h5py_init.py
index ed4ec80..dfc7ae5 100644
--- a/h5py_init.py
+++ b/h5py_init.py
@@ -8,80 +8,50 @@ import sys
 
 '''
 Note, these functions will not work without setting up the directories of video frames as shown in get_dir_lists. 
-Alternatively, contact me to get access to the final h5Py datasets, which this code procudes.
 '''
 
-root_drive = '.' #Current dir for now
-
-#if not os.path.isdir(root_drive):
-#    print('Using Sharcnet equivalent of root_drive')
-#    root_drive = '/home/jjniatsl/project/jjniatsl/Datasets'
+root_drive = '.'
 
 def get_dir_lists(dset):
 
     '''
-    Gets videos (ADl, Fall) directory path list
-    
+    Gets videos directory path list
     Params:
         str dset: dataset to be loaded
     Returns:
-        paths to ADL and Fall videos
+        paths to ADL/NA and Fall/Intrusion videos
     '''
 
-    path_Fall = root_drive + '/Datasets/{}/Fall/Fall*'.format(dset)
-    path_ADL = root_drive + '/Datasets/{}/NonFall/ADL*'.format(dset)
-
-    # Update path_Fall & ADL if some dataset in arranged in some particular way in folders!
-    if dset == 'IDD':
-        path_Fall = root_drive + '/Datasets/{}/Intrusion/Intru*'.format(dset)
-        path_ADL = root_drive + '/Datasets/{}/NonIntrusion/NA*'.format(dset)
-
-    elif dset == 'Thermal-Dummy':
-        path_Fall = root_drive + '/Datasets/Thermal-Dummy/Fall/Fall*'
-        path_ADL = root_drive + '/Datasets/Thermal-Dummy/NonFall/ADL*'
-
-    elif dset == 'Thermal':
-        path_Fall = root_drive + '/Datasets/Thermal/Fall/Fall*'
-        path_ADL = root_drive + '/Datasets/Thermal/NonFall/ADL*'
-    
-    elif dset == 'UR':
-        path_Fall = root_drive + '/Datasets/UR_Kinect/Fall/original/Fall*'
-        path_ADL = root_drive + '/Datasets/UR_Kinect/NonFall/original/adl*'
-    
-    elif dset == 'UR-Filled':
-        path_Fall = root_drive + '/Datasets/UR_Kinect/Fall/filled/Fall*'
-        path_ADL = root_drive + '/Datasets/UR_Kinect/NonFall/filled/adl*'
+    path_abnormal_vids = root_drive + '/Datasets/{}/Fall/Fall*'.format(dset)
+    path_normal_vids = root_drive + '/Datasets/{}/NonFall/ADL*'.format(dset)
 
-    elif dset == 'SDU':
-        path_Fall = root_drive + '/Datasets/SDUFall/Fall/Fall*/Depth'
-        path_ADL = root_drive + '/Datasets/SDUFall/NonFall/ADL*/Depth'
-    
-    elif dset == 'SDU-Filled':
-        path_Fall = root_drive + '/Datasets/SDUFall/Fall/Fall*/Filled'
-        path_ADL = root_drive + '/Datasets/SDUFall/NonFall/ADL*/Filled'
+    # Update paths if some dataset is arranged in some particular way in folders!
+    if dset == 'Thermal_Intrusion':
+        path_abnormal_vids = root_drive + '/Datasets/{}/Intrusion/Intru*'.format(dset)
+        path_normal_vids = root_drive + '/Datasets/{}/NonIntrusion/NA*'.format(dset)
         
-    print(path_Fall, path_ADL)
+    print(path_normal_vids, path_abnormal_vids)
 
     # glob returns non-ordered list of all pathnames matching a specified pattern
-    vid_dir_list_Fall = glob.glob(path_Fall)
-    vid_dir_list_ADL = glob.glob(path_ADL)
+    normal_vids_dir = glob.glob(path_normal_vids)
+    abnormal_vids_dir = glob.glob(path_abnormal_vids)
 
-    if len(vid_dir_list_Fall) == 0:
-        print('no Fall vids found')
+    if len(abnormal_vids_dir) == 0:
+        print('no Fall/Intru vids found')
     
-    if len(vid_dir_list_ADL) == 0:
-        print('no ADL vids found')
+    if len(normal_vids_dir) == 0:
+        print('no ADL/NA vids found')
 
-    return vid_dir_list_ADL, vid_dir_list_Fall
+    return normal_vids_dir, abnormal_vids_dir
 
 
-def init_videos(img_width = 64, img_height = 64, raw = False, dset = 'Thermal'):
+def init_videos(img_width = 64, img_height = 64, raw = False, dset = 'Thermal_Fall'):
 
     '''
 
     Creates or overwrites h5py group corresponding to root_path (in body),
     for the h5py file located at
-    'N:/FallDetection/Datasets/H5Data/Data_set-{}-imgdim{}x{}.h5'.format(dset, img_width, img_height)
+    'H5Data/Data_set-{}-imgdim{}x{}.h5'.format(dset, img_width, img_height)
 
     For info on h5py: http://docs.h5py.org/en/stable/quick.html#quick
 
@@ -89,12 +59,12 @@ def init_videos(img_width = 64, img_height = 64, raw = False, dset = 'Thermal'):
     
     Processed (or Raw)
         Split_by_video
-            ADL1
+            ADL1 or NA1
                 Data
                     <HDF5 dataset "Data": shape (1397, 4096), type "<f8">
                 Labels
                     <HDF5 dataset "Labels": shape (1397,), type "<i4">
-            ADL2
+            ADL2 or NA2
                 Data
                     <HDF5 dataset "Data": shape (3203, 4096), type "<f8">
                 Labels
@@ -103,13 +73,13 @@ def init_videos(img_width = 64, img_height = 64, raw = False, dset = 'Thermal'):
                 .
                 .
 
-            ADL{N}
+            ADL{N} or NA{N}
                 Data
                     <HDF5 dataset "Data": shape (3203, 4096), type "<f8">
                 Labels
                     <HDF5 dataset "Labels": shape (3203,), type "<i4">
 
-            Fall1
+            Fall1 or Intru1
                 Data
                     <HDF5 dataset "Data": shape (49, 4096), type "<f8">
                 Labels
@@ -117,14 +87,14 @@ def init_videos(img_width = 64, img_height = 64, raw = False, dset = 'Thermal'):
                 .
                 .
                 .
-            Fall{M}
+            Fall{M} or Intru{M}
                 Data
                     <HDF5 dataset "Data": shape (49, 4096), type "<f8">
                 Labels
                     <HDF5 dataset "Labels": shape (49,), type "<i4">
 
 
-            where N is number of ADL videos, and M is number of Fall videos.
+            where N is number of ADL/NA videos, and M is number of Fall/Intrusion videos.
 
     Params:
         bool raw: if true, data will be not processed (mean centering and intensity scaling)
@@ -133,11 +103,11 @@ def init_videos(img_width = 64, img_height = 64, raw = False, dset = 'Thermal'):
         str dset: dataset to be loaded
     '''
 
-    path = root_drive + '/H5Data/Data_set-{}-imgdim{}x{}.h5'.format( dset, img_width, img_height)
+    path = root_drive + '/H5Data/Data_set-{}-imgdim{}x{}.h5'.format(dset, img_width, img_height)
 
-    vid_dir_list_0, vid_dir_list_1 = get_dir_lists(dset) # Dir of ADL, Fall videos
+    normal_vids_dir, abnormal_vids_dir = get_dir_lists(dset) # Directories of ADL/NA, Fall/Intrusion videos
 
-    if len(vid_dir_list_0) == 0 and len(vid_dir_list_1) == 0:
+    if len(normal_vids_dir) == 0 and len(abnormal_vids_dir) == 0:
         print('no videos found, make sure video files are placed in Datasets folder, terminating...')
         sys.exit()
 
@@ -148,19 +118,19 @@ def init_videos(img_width = 64, img_height = 64, raw = False, dset = 'Thermal'):
 
     print('creating data at root_path', root_path)
 
-    def init_videos_helper(root_path): #Nested to keep scope
+    def init_videos_helper(root_path):
             with h5py.File(path, 'a') as hf:
                 root = hf.create_group(root_path)
 
-                for vid_dir in vid_dir_list_1:
-                    # Create hf group for each Fall video with frames and labels for frame
-                    init_vid(vid_dir = vid_dir, vid_class = 1, img_width = img_width,
-                             img_height = img_height, hf = root, raw = raw, dset = dset)
+                for vid_dir in abnormal_vids_dir:
+                    # Create hf group for each Fall/Intrusion video with frames and labels for frame
+                    init_vid(vid_dir=vid_dir, vid_class=1, img_width=img_width,
+                             img_height=img_height, hf=root, raw=raw, dset=dset)
 
-                for vid_dir in vid_dir_list_0:
-                    # Create hf group for each ADL video with frames and labels for frame
-                    init_vid(vid_dir = vid_dir, vid_class = 0, img_width = img_width,
-                             img_height = img_height, hf = root, raw = raw, dset = dset)
+                for vid_dir in normal_vids_dir:
+                    # Create hf group for each ADL/NA video with frames and labels for frame
+                    init_vid(vid_dir=vid_dir, vid_class=0, img_width=img_width,
+                             img_height=img_height, hf=root, raw=raw, dset=dset)
 
     if os.path.isfile(path):
         # If .h5 already exists
@@ -169,7 +139,6 @@ def init_videos(img_width = 64, img_height = 64, raw = False, dset = 'Thermal'):
             print('video h5py file exists, deleting old group {}, creating new'.format(root_path))
             del hf[root_path]
             hf.close()
-
             init_videos_helper(root_path)
 
         else:
@@ -183,56 +152,47 @@ def init_videos(img_width = 64, img_height = 64, raw = False, dset = 'Thermal'):
         init_videos_helper(root_path)
 
 
-def init_vid(vid_dir = None, vid_class = None, img_width = 32, img_height = 32,
-             hf = None, raw = False,  dset = 'Thermal'):
+def init_vid(vid_dir = None, vid_class = None, img_width = 64, img_height = 64,
+             hf = None, raw = False,  dset = 'Thermal_Fall'):
     '''
     Creates hf group with vid_dir name to put Data and Labels for each frame
-    Processes all frames in video in ascending order & fetches labels for Fall vids from csv
+    Processes all frames in video in ascending order & fetches labels for Fall/Intrusion vids from csv if asked
 
     Params:
-        str vid_dir: path to vid dir of frames to be initialzied
-        int vid_class: 1 for Fall, 0 for NonFall
+        str vid_dir: path to vid dir of frames to be initialized
+        int vid_class: 1 for Fall/Intrusion, 0 for NonFall/NonIntrusion
         h5py group: group within which new group is nested
-
     '''
 
     print('initializing vid at', vid_dir)
 
     #--Get all frames from a video reading frames in ↑ order & processing each frame
-    # data.shape = (#frames, img_ht, img_wdth, 1)
-    data = create_img_data_set(fpath = vid_dir, ht = img_height, wd = img_width, raw = raw,
-                               sort = True, dset = dset)
-
-    labels = np.zeros(len(data)) # Labels intialized to 0 for each frame
-
-    if dset == 'SDU' or dset == 'SDU-Filled':
-        vid_dir_name = os.path.basename(os.path.dirname(vid_dir))
-    else:
-        vid_dir_name = os.path.basename(vid_dir)
-
-    print('vid_dir_name', vid_dir_name) # eg: Fall31 (vid folder name)
-    grp = hf.create_group(vid_dir_name) # Create folder name group eg. Fall31
-
-    # Fetch fall label start end from csv if Fall vid, assign to labels array
-    if (vid_dir_name in ['Fall' + str(i) for i in range(201)]) or (vid_dir_name in ['Intru' + str(i) for i in range(250)]): # 201 is max fall index across all vids
-        print('setting fall start')
-        Fall_start, Fall_stop = get_fall_indeces(vid_dir_name, dset)
-        labels[Fall_start-1:Fall_stop] = 1 # Fall_start:Fall_stop + 1
+    data = create_img_data_set(fpath=vid_dir, ht=img_height, wd=img_width, raw=raw, sort=True, dset=dset)
+    labels = np.zeros(len(data)) # Labels initialized to 0 for each frame
+
+    vid_dir_name = os.path.basename(vid_dir)
+    print('vid_dir_name', vid_dir_name) # eg: ADL2 or Fall31 (vid folder name)
+    grp = hf.create_group(vid_dir_name) # Create folder name group eg. ADL2 or Fall31
+
+    # If abnormal video, get fall/intrusion labels for each frame
+    if vid_class == 1:
+        print('setting fall/intrusion start & end')
+        abnormality_start, abnormality_stop = get_abnormal_indices(vid_dir_name, dset)
+        labels[abnormality_start-1:abnormality_stop] = 1
     
     grp['Labels'] = labels
     grp['Data'] = data
 
-def get_fall_indeces(Fall_name, dset):
+def get_abnormal_indices(dir_name, dset):
 
-    # Get Fall start, end indices from Labels.csv
+    # Get Fall/Intrusion start, end indices from Labels.csv
 
     root_dir = './Datasets/'
     labels_dir = root_dir + '/{}/Labels.csv'.format(dset)
 
     import pandas as pd
-    my_data = pd.read_csv(labels_dir, sep=',', header = 0, index_col = 0)
-    
-    start, stop = my_data.loc[Fall_name][:2]
+    my_data = pd.read_csv(labels_dir, sep=',', header=0, index_col=0)
+    start, stop = my_data.loc[dir_name][:2]
     print('start, stop', start, stop)
 
     return int(start), int(stop)
@@ -240,41 +200,19 @@ def get_fall_indeces(Fall_name, dset):
 
 def sort_frames(frames, dset):
 
+        # Frames are organized as: FALL_1-0001.jpg, FALL_1-0003.jpg, FALL_1-0002.jpg,...
         # Sorts the list frames in the ascending order acc to dset type
 
-        if dset == 'SDU' or dset == 'SDU-Filled': #TODO remove try except, failing to sort shoudl stop!
-            print('sorting SDU frames...')
-            
-            #try:
-            frames = sorted(frames, key = lambda x: int(os.path.basename(x).split('.')[0])) #SDU
-            # except ValueError:
-            #     print('failed to sort SDU vid frames')
-            #     pass
-        elif dset == 'UR' or dset == 'UR-Filled' or dset == 'Thermal' or dset == 'Thermal-Intrusion' or dset == 'IDD':
-            print('sorting UR or Thermal frames...')
-            try:
-                frames = sorted(frames, key = lambda x: int(x.split('-')[-1].split('.')[0]))
-            except ValueError:
-                print('failed to sort UR vid frames')
-                return
-            
-        elif dset == 'TST': 
-            try:
-                frames = sorted(frames, key = lambda x: int(x.split('_')[-1].split('.')[0]))
-            except ValueError:
-                print('failed to sort vid frames, trying again....')
-                pass
-
-        elif dset == 'FallFree' or dset == 'FallFree-Filled':
-            try:
-                frames = sorted(frames, key = lambda x: int(x.split('_')[2]))
-            except ValueError:
-                print('failed to sort vid frames, trying again....')
-                pass
+        print('sorting frames...')
+        try:
+            frames = sorted(frames, key=lambda x: int(x.split('-')[-1].split('.')[0]))
+        except ValueError:
+            print('failed to sort vid frames')
+            return
 
         return frames
 
-def create_img_data_set(fpath, ht = 64, wd = 64, raw = False, sort = True, dset = 'Thermal'):
+def create_img_data_set(fpath, ht = 64, wd = 64, raw = False, sort = True, dset = 'Thermal_Fall'):
 
         '''
         Creates data set of all images located at fpath. Sorts images if asked!
@@ -292,15 +230,12 @@ def create_img_data_set(fpath, ht = 64, wd = 64, raw = False, sort = True, dset
         '''
 
         fpath = fpath.replace('\\', '/')
-
         # Get all frames inside folder or folders with jpg or png extension
         frames = glob.glob(fpath+'/*.jpg') + glob.glob(fpath+'/*.png')
 
         if sort == True:
             frames = sort_frames(frames, dset)
 
-        #print("\n".join(frames)) #Use this to check if sorted
-
         data = np.zeros((frames.__len__(), ht, wd, 1)) # (frame_length, 64, 64, 1)
 
         for x, i in zip(frames, range(0, frames.__len__())):
@@ -313,16 +248,11 @@ def create_img_data_set(fpath, ht = 64, wd = 64, raw = False, sort = True, dset
 
             if raw == False:
                 # Image Processing
-
                 img = img - np.mean(img) # Mean Centering
                 img = img.astype('float32') / 255. # Rescaling
-
             data[i, :, :, :] = img
 
-       # data = data.reshape((len(data), np.prod(data.shape[1:]))) #Flatten the images
-
         print('data.shape', data.shape)
-
         return data
 
 def init_data_by_class(vid_class = 'NonFall', dset = 'Thermal',\
diff --git a/img_exp.py b/img_exp.py
index fdedd3a..fbf2217 100644
--- a/img_exp.py
+++ b/img_exp.py
@@ -64,11 +64,11 @@ class ImgExp:
 		'''
 	    Saves the model of the experiment to './Models/self.dset/self.model_name'
 	    '''
-		#save_string = self.exp_name #Do this again incase info added to str based on data load(ie after init)
+
 		if self.hor_flip == True:
 			self.model_name = self.model_name + '-hor_flip'
 
-		base = './Models/{}/'.format(self.dset)
+		base = './Models/{}'.format(self.dset)
 
 		if not os.path.isdir(base):
 			os.makedirs(base)
diff --git a/models.py b/models.py
index 551a44e..93044a7 100644
--- a/models.py
+++ b/models.py
@@ -11,36 +11,30 @@ from keras import backend as K
 """
 Defining Keras models as functions which return model object, aswell as model name and mode type strs.
 All models take take img_width and img_height ints, which correpsond to dimensions of images passed to models.
-
 """
+
 def DSTCAE_C3D(img_width, img_height, win_length):
 
     input_shape = (win_length, img_width, img_height, 1)
-    input_window = Input(shape = input_shape)
+    input_window = Input(shape=input_shape)
     temp_pool = 2
 
     x = Conv3D(16, (5, 3, 3), activation='relu', padding='same')(input_window)
     x = MaxPooling3D((1, 2, 2), padding='same')(x)
-
     x = Conv3D(8, (5, 3, 3), activation='relu', padding='same')(x)
-    x = MaxPooling3D((temp_pool, 2, 2), padding='same')(x) #4
-
+    x = MaxPooling3D((temp_pool, 2, 2), padding='same')(x)
     x = Dropout(0.25)(x)
-
     x = Conv3D(8, (5, 3, 3), activation='relu', padding='same')(x)
-    encoded = MaxPooling3D((temp_pool, 2, 2), padding='same')(x) #2
+    encoded = MaxPooling3D((temp_pool, 2, 2), padding='same')(x)
 
     # at this point the representation is (2, 8, 8) i.e. 128-dimensional
 
     x = Conv3D(8, (5, 3, 3), activation='relu', padding='same')(encoded)
-    x = UpSampling3D((temp_pool, 2, 2))(x) #4
-
+    x = UpSampling3D((temp_pool, 2, 2))(x)
     x = Conv3D(8, (5, 3, 3), activation='relu', padding='same')(x)
-    x = UpSampling3D((temp_pool, 2, 2))(x) #8
-
+    x = UpSampling3D((temp_pool, 2, 2))(x)
     x = Conv3D(16, (5, 3, 3), activation='relu', padding = 'same')(x)
     x = UpSampling3D((1, 2, 2))(x)
-
     decoded = Conv3D(1, (5, 3, 3), activation='tanh', padding='same')(x)
 
     autoencoder = Model(input_window, decoded)
@@ -49,7 +43,6 @@ def DSTCAE_C3D(img_width, img_height, win_length):
     model_type = 'conv'
     model_name = 'DSTCAE_C3D'
     model = autoencoder
-
     return model, model_name, model_type
 
 
@@ -66,23 +59,15 @@ def DSTCAE_UpSampling(img_width, img_height, win_length):
 
     x = Conv3D(16, (temp_depth, 3, 3), activation='relu', padding='same')(input_window)
     x = MaxPooling3D((2, 2, 2), padding='same')(x)
-
-    # x = Conv3D(8, (temp_depth, 3, 3), activation='relu', padding='same')(x)
-    # x = MaxPooling3D((temp_pool, 2, 2), padding='same')(x)
-
     x = Dropout(0.25)(x)
-
     x = Conv3D(8, (temp_depth, 3, 3), activation='relu', padding='same')(x)
     encoded = MaxPooling3D((temp_pool, 2, 2), padding='same')(x)
-
     # at this point the representation is (2, 16, 16) i.e. 128-dimensional
 
     x = Conv3D(8, (temp_depth, 3, 3), activation='relu', padding='same')(encoded)
     x = UpSampling3D((temp_pool, 2, 2))(x)
-
     x = Conv3D(16, (temp_depth, 3, 3), activation='relu', padding='same')(x)
     x = UpSampling3D((temp_pool, 2, 2))(x)
-
     decoded = Conv3D(1, (temp_depth, 3, 3), activation='tanh', padding='same')(x)
 
     autoencoder = Model(input_window, decoded)
@@ -91,14 +76,12 @@ def DSTCAE_UpSampling(img_width, img_height, win_length):
     model_type = 'conv'
     model_name = 'DSTCAE_UpSamp'
     model = autoencoder
-
     return model, model_name, model_type
 
 
 def DSTCAE_Deconv(img_width, img_height, win_length):
     """
     int win_length: Length of window of frames
-
     Replace Upsampling with Deconv
     """
 
@@ -110,16 +93,12 @@ def DSTCAE_Deconv(img_width, img_height, win_length):
     
     x = Conv3D(16, (temp_depth, 3,3), activation='relu', padding='same')(input_window)
     x = MaxPooling3D((2, 2, 2), padding='same')(x)
-    #x = Conv3D(8, (temp_depth, 3, 3), activation='relu', padding='same')(x)
-    #x = MaxPooling3D((temp_pool, 2, 2), padding='same')(x)
     x = Dropout(0.25)(x)
-
     x = Conv3D(8, (temp_depth, 3, 3), activation='relu', padding='same')(x)
     encoded = MaxPooling3D((temp_pool, 2, 2), padding='same')(x)
 
     x = Deconvolution3D(8, (temp_depth, 3, 3), strides = (2,2,2), activation='relu', padding='same')(encoded)
     x = Deconvolution3D(16, (temp_depth, 3, 3), strides = (2,2,2), activation='relu', padding='same')(x)
-    
     decoded = Conv3D(1, (temp_depth, 3, 3), activation='tanh', padding='same')(x)
 
     autoencoder = Model(input_window, decoded)
@@ -128,14 +107,4 @@ def DSTCAE_Deconv(img_width, img_height, win_length):
     model_type = 'conv'
     model_name = 'DSTCAE_Deconv'
     model = autoencoder
-
-    return model, model_name, model_type
-
-
-import numpy as np
-if __name__ == "__main__":
-    model = dummy_3d(64,64,2)
-    print(model.summary())
-    # dummy = np.ones((1,8,64,64,1))*255
-    # #dummy = dummy- np.mean(dummy)
-
+    return model, model_name, model_type
\ No newline at end of file
diff --git a/seq_exp.py b/seq_exp.py
index d9ecb0c..324710e 100644
--- a/seq_exp.py
+++ b/seq_exp.py
@@ -44,28 +44,27 @@ class SeqExp(ImgExp):
                  pre_load=None, initial_epoch=0, epochs=1, dset='Thermal',
                  win_len=8, hor_flip=False, img_width=64, img_height=64):
 
-        ImgExp.__init__(self, model=model, img_width=img_width, \
-                        img_height=img_height, model_name=model_name, \
-                        batch_size=batch_size, model_type=model_type, \
-                        pre_load=pre_load, initial_epoch=initial_epoch, \
+        ImgExp.__init__(self, model=model, img_width=img_width,
+                        img_height=img_height, model_name=model_name,
+                        batch_size=batch_size, model_type=model_type,
+                        pre_load=pre_load, initial_epoch=initial_epoch,
                         epochs=epochs, hor_flip=hor_flip, dset=dset)
 
         self.win_len = win_len
 
-    def set_train_data(self, raw=False, mmap_mode=None):  # TODO init windows from h5py if no npData found
+    def set_train_data(self, raw=False, mmap_mode=None):
 
         '''
         loads or initializes windowed train data, and sets self.train_data accordingly
         '''
 
-        to_load = root_drive + '/npData/{}/ADL_data-proc-win_{}.npy'.format(self.dset, self.win_len)
+        to_load = root_drive + '/npData/{}/training_data-imgdim_{}x{}-win_{}.npy'.format(self.dset, self.img_width, self.img_height, self.win_len)
 
         if os.path.isfile(to_load):
-            print('npData found, loading..')
+            print('Training (npData) found, loading..')
             self.train_data = np.load(to_load, mmap_mode=mmap_mode)
         else:
-            print('npData not found, initializing..')
-
+            print('Training (npData) not found, initializing..')
             self.train_data = init_windowed_arr(dset=self.dset, ADL_only=True,
                                                 win_len=self.win_len,
                                                 img_width=self.img_width,
@@ -95,9 +94,9 @@ class SeqExp(ImgExp):
     def train(self, sample_weight=None):
 
         """
-                trains a sequential autoencoder on windowed data(sequences of
-                contiguous frames are reconstucted)
-                """
+        trains a sequential autoencoder on windowed data(sequences of
+        contiguous frames are reconstructed)
+        """
 
         model_name = self.model_name
         base = './Checkpoints/{}'.format(self.dset)
@@ -108,8 +107,7 @@ class SeqExp(ImgExp):
         if not os.path.isdir(base_logs):
             os.mkdir(base_logs)
 
-        checkpointer = ModelCheckpoint(filepath=base + '/' + model_name + '-' + \
-                                                '{epoch:03d}-{loss:.3f}.hdf5', period=100, verbose=1)
+        checkpointer = ModelCheckpoint(filepath=base + '/' + model_name + '-' + '{epoch:03d}-{loss:.3f}.hdf5', period=100, verbose=1)
         timestamp = time.time()
         print('./Checkpoints/' + model_name + '-' + '.{epoch:03d}-{loss:.3f}.hdf5')
 
@@ -118,25 +116,20 @@ class SeqExp(ImgExp):
         print(callbacks_list)
         print(csv_logger)
 
-        self.model.fit(self.train_data, self.train_data, epochs = self.epochs,
-                       batch_size = self.batch_size, verbose = 2,
-                       callbacks = callbacks_list, sample_weight = sample_weight)
+        self.model.fit(self.train_data, self.train_data, epochs=self.epochs,
+                       batch_size=self.batch_size, verbose=2,
+                       callbacks=callbacks_list, sample_weight=sample_weight)
         self.save_exp()
 
     def init_flipped_by_win(self, to_load_flip):
 
         if os.path.isfile(to_load_flip):
             data_flip = np.load(to_load_flip)
-            data_flip = data_flip.reshape(len(data_flip), self.train_data.shape[1],
-                                          self.train_data.shape[2],
-                                          self.train_data.shape[3], 1)
-
+            data_flip = data_flip.reshape(len(data_flip), self.train_data.shape[1], self.train_data.shape[2], self.train_data.shape[3], 1)
             return data_flip
-
         else:
             print('creating flipped by window data..')
             data_flip = flip_windowed_arr(self.train_data)
-
             return data_flip
 
     def get_MSE(self, test_data, agg_type='r_sigma'):
diff --git a/util.py b/util.py
index 30a5ec4..033be7f 100644
--- a/util.py
+++ b/util.py
@@ -37,16 +37,9 @@ def threshold(predictions=None, t=0.5):
 
 
 def get_output(labels, predictions, get_thres=False, to_plot=False, data_option=None, t=0.5, pos_label=1,
-               dset='Thermal', model_name='dst', dir_name='None'):
+               dset='Thermal_Fall', model_name='dst', dir_name='None'):
     # Calculate AUROC, AUPR and optimal threshold if asked for!
 
-    # predicted_classes = threshold(predictions, t)  # not useful t=0.5
-    # true_classes = labels
-    #
-    # # create confusion matrix (2 classes)
-    # conf_mat = confusion_matrix(y_true=true_classes, y_pred=predicted_classes)
-    # # report = classification_report(true_classes, predicted_classes)
-    # g_mean = geometric_mean_score(labels, predicted_classes)
     AUROC = []
     AUPR = []
 
@@ -296,9 +289,9 @@ def play_frames(frames, decoded_frames=[], labels=[]):
 
 def generate_test_vid_names(data_dict, dset):
 
-    ##--generates test folder names
+    '''--generates test folder names'''
 
-    if dset == 'Thermal_Fall':
+    if dset == 'Thermal_Fall' or 'Thermal_Dummy':
         vid_base_name = 'Fall'
     elif dset == 'Thermal_Intrusion':
         vid_base_name = 'Intru'
-- 
GitLab