import sys import os import time import argparse import pandas as pd import numpy as np from data_preprocessing import Preprocessor from features_extractor import feature_extractor from ClassPreprocessor import remove_weak_classes, resample_classes, create_dict, split_class from classifiers import classifiers, grid_params from sklearn.model_selection import train_test_split from sklearn import preprocessing from evaluate_model import evaluate_model from sklearn.model_selection import GridSearchCV import configparser import pickle import nltk nltk.download('stopwords') nltk.download('punkt') parser = argparse.ArgumentParser() parser.add_argument("dataPath", help="Path of the dataframe") parser.add_argument("columnText", help="the column name of the text that should preproceed", default = 'content') parser.add_argument("columnClass", help="ColumnClass the column name of the classes") parser.add_argument("minOfInstancePerClass", help="minOfInstancePerClass the minimum of instance required for each class", type=int) parser.add_argument("maxOfInstancePerClass", help="maxOfInstancePerClass the maximum of instance required resamling classes", type=int) args = parser.parse_args() dataPath = args.dataPath columnText = args.columnText columnClass = args.columnClass minOfInstancePerClass = args.minOfInstancePerClass maxOfInstancePerClass = args.maxOfInstancePerClass if not os.path.exists('reports'): os.makedirs('reports') if not os.path.exists(os.path.join('reports', columnClass)): os.makedirs(os.path.join('reports', columnClass)) # create directory in the reports directory so save the classification results dir_name_report = str(minOfInstancePerClass) + '_' + str(maxOfInstancePerClass) if not os.path.exists(os.path.join('reports', columnClass, dir_name_report)): os.makedirs(os.path.join('reports', columnClass, dir_name_report)) # create directory to save and load models if not os.path.exists('models'): os.makedirs('models') # Reading data and preprocessings steps preprocessor = Preprocessor() df = pd.read_csv(dataPath, sep="\t") df = remove_weak_classes(df, columnClass, minOfInstancePerClass) df = resample_classes(df, columnClass, maxOfInstancePerClass) #Read configuration file for retreiving parameters of features extractors config = configparser.ConfigParser() config.read('settings.conf') vectorization_max_df = int(config.get('vectorizers','vectorization_max_df')) if config.get('vectorizers','vectorization_max_df').isdigit() else float(config.get('vectorizers','vectorization_max_df')) vectorization_min_df = int(config.get('vectorizers','vectorization_min_df')) if config.get('vectorizers','vectorization_min_df').isdigit() else float(config.get('vectorizers','vectorization_min_df')) vectorization_numberOfFeatures = int(config.get('vectorizers','vectorization_numberOfFeatures')) if config.get('vectorizers','vectorization_numberOfFeatures').isdigit() else None doc2vec_vec_size = int(config.get('vectorizers','doc2vec_vec_size')) max_epochs = int(config.get('vectorizers','max_epochs')) doc2vec_min_count = int(config.get('vectorizers','doc2vec_min_count')) doc2vec_dm = int(config.get('vectorizers','doc2vec_dm')) # If dm=1, ‘distributed memory’ (PV-DM) is used. Otherwise, distributed bag of words (PV-DBOW) is employed. doc2vec_workers = int(config.get('vectorizers','doc2vec_workers')) print("size after resampling, ",len(df)) #prepare data #df = df[df[columnClass] != 'unclassified'] y = df[columnClass] print(df.head()) print(df[columnClass].head()) train_x, test_x, train_y, test_y = train_test_split(df, y, test_size=0.33, random_state=42, stratify = y ) encoder = preprocessing.LabelEncoder() train_y = encoder.fit_transform(train_y) valid_y = encoder.fit_transform(test_y) print("size training set, ",len(train_x)) print("size validation set, ",len(test_x)) for columnInput in [columnText, 'firstParagraph']: print('Process: ' + columnInput) extractor = feature_extractor(train_x, test_x, columnInput, columnClass) features_techniques = [ ('counter', extractor.count_vect(max_df = vectorization_max_df, min_df = vectorization_min_df, numberOfFeatures = vectorization_numberOfFeatures )), ('tf_idf', extractor.tf_idf(max_df = vectorization_max_df, min_df = vectorization_min_df, numberOfFeatures = vectorization_numberOfFeatures)), ('doc2vec', extractor.doc2vec(max_epochs, doc2vec_vec_size, doc2vec_min_count , doc2vec_dm))] #case of full text for feature_technique_name, features in features_techniques: print("**** Classifier :", feature_technique_name) # features has the train_x and the test_x after vectorization train_x, test_x = features for tmp_clf, tmp_grid_params in zip(classifiers, grid_params): clf_name, clf = tmp_clf grid_param_name, grid_param = tmp_grid_params print(clf_name, clf, grid_param_name, grid_param) model_file_name = columnInput + '_' +feature_technique_name + '_' + clf_name+ str(minOfInstancePerClass) + '_' + str(maxOfInstancePerClass) +".pkl" if clf_name != 'bayes' : clf = GridSearchCV(clf, grid_param, refit = True, verbose = 3, n_jobs=-1) elif feature_technique_name == 'doc2vec': continue t_begin = time.time() if os.path.isfile(os.path.join('./models', model_file_name)): report, accuracy, weighted_avg = evaluate_model(clf, test_x, valid_y, y_pred, valid_y, [str(e) for e in encoder.transform(encoder.classes_)], encoder.classes_, os.path.join('reports', columnClass, dir_name_report, file_name_report)+'.pdf') with open(os.path.join('./models',columnClass, model_file_name), 'rb') as file: clf = pickle.load(file) else: with open(os.path.join('./models',columnClass, model_file_name), 'wb') as file: clf.fit(train_x, train_y) pickle.dump(clf, file) t_end =time.time() training_time = t_end - t_begin y_pred = clf.predict(test_x) #evaluate model file_name_report = columnInput + '_' +feature_technique_name + '_' + clf_name report, accuracy, weighted_avg = evaluate_model(clf, test_x, valid_y, y_pred, valid_y, [str(e) for e in encoder.transform(encoder.classes_)], encoder.classes_, os.path.join('reports', columnClass, dir_name_report, file_name_report)+'.pdf') report.to_csv(os.path.join('reports', columnClass, dir_name_report, file_name_report+'.csv')) with open(os.path.join('reports', columnClass, dir_name_report, file_name_report+'.txt'), 'w') as f: sys.stdout = f # Change the standard output to the file we created. print('accuracy : {}'.format(accuracy)) print('weighted_Precision : {}'.format(weighted_avg['precision'])) print('weighted_Recall : {}'.format(weighted_avg['recall'])) print('weighted_F-score : {}'.format(weighted_avg['f1-score'])) print('weighted_Support : {}'.format(weighted_avg['support'])) print(dict(zip(encoder.classes_, encoder.transform(encoder.classes_)))) print('training time : {}'.format(training_time)) try: print('best parameters : {}'.format(clf.best_params_)) except AttributeError: pass #sys.stdout = sys.stdout # Reset the standard output to its original value sys.stdout = sys.__stdout__