Skip to content
Snippets Groups Projects
Commit c2f5f604 authored by Ludovic Moncla's avatar Ludovic Moncla
Browse files

Merge branch 'master' into 'branch_dev_vectorizationFeature'

# Conflicts:
#   classifiers.py
#   experimentsClassicClassifiers.py
parents e659184a a2ae5c96
No related branches found
No related tags found
1 merge request!4Branch dev vectorization feature
This commit is part of merge request !4. Comments created here will be created in the context of that merge request.
...@@ -14,15 +14,15 @@ classifiers = [ ...@@ -14,15 +14,15 @@ classifiers = [
('bayes', MultinomialNB()), ('bayes', MultinomialNB()),
('lr', LogisticRegression()), ('lr', LogisticRegression()),
('sgd', SGDClassifier()), ('sgd', SGDClassifier()),
('svm', SVC() ), #('decisionTree',DecisionTreeClassifier()),
('decisionTree',DecisionTreeClassifier()),
('rfc', RandomForestClassifier()), ('rfc', RandomForestClassifier()),
('knn', KNeighborsClassifier()) ('knn', KNeighborsClassifier()),
('svm', SVC() )
] ]
param_grid_svm = {'C':[1,10,100,1000],'gamma':[1,0.1,0.001,0.0001], 'kernel':['linear','rbf']} param_grid_svm = {'C':[1,10,100,1000],'gamma':[0.1,0.001,0.0001], 'kernel':['linear','rbf']}
param_grid_decisionTree = { 'criterion' : ['gini', 'entropy'], 'max_depth':range(5,10), 'min_samples_split': range(5,10), 'min_samples_leaf': range(1,5) } #param_grid_decisionTree = { 'criterion' : ['gini', 'entropy'], 'max_depth':range(5,10), 'min_samples_split': range(5,10), 'min_samples_leaf': range(1,5) }
param_grid_rfc = { 'n_estimators': [200, 500], 'max_features': ['auto', 'sqrt', 'log2'], 'max_depth' : [4,5,6,7,8], 'criterion' :['gini', 'entropy'] } param_grid_rfc = { 'n_estimators': [200, 500], 'max_features': ['auto', 'sqrt', 'log2'], 'max_depth' : [4,5,6,7,8], 'criterion' :['gini', 'entropy'] }
param_grid_lr = {"C":np.logspace(-3,3,7), "penalty":['none',"l2"]} param_grid_lr = {"C":np.logspace(-3,3,7), "penalty":['none',"l2"]}
param_grid_sgd = { "loss" : ["hinge", "log", "squared_hinge", "modified_huber"], "alpha" : [0.0001, 0.001, 0.01, 0.1], "penalty" : ["l2", "l1", "none"], "max_iter" : [500]} param_grid_sgd = { "loss" : ["hinge", "log", "squared_hinge", "modified_huber"], "alpha" : [0.0001, 0.001, 0.01, 0.1], "penalty" : ["l2", "l1", "none"], "max_iter" : [500]}
......
...@@ -72,8 +72,8 @@ doc2vec_min_count = int(config.get('vectorizers','doc2vec_min_count')) ...@@ -72,8 +72,8 @@ doc2vec_min_count = int(config.get('vectorizers','doc2vec_min_count'))
doc2vec_dm = int(config.get('vectorizers','doc2vec_dm')) # If dm=1, ‘distributed memory’ (PV-DM) is used. Otherwise, distributed bag of words (PV-DBOW) is employed. doc2vec_dm = int(config.get('vectorizers','doc2vec_dm')) # If dm=1, ‘distributed memory’ (PV-DM) is used. Otherwise, distributed bag of words (PV-DBOW) is employed.
doc2vec_workers = int(config.get('vectorizers','doc2vec_workers')) doc2vec_workers = int(config.get('vectorizers','doc2vec_workers'))
for columnInput in [columnText, 'firstParagraph']:
for columnInput in ['firstParagraph',columnText]:
print('Process: ' + columnInput) print('Process: ' + columnInput)
#prepare data #prepare data
...@@ -108,7 +108,7 @@ for columnInput in [columnText, 'firstParagraph']: ...@@ -108,7 +108,7 @@ for columnInput in [columnText, 'firstParagraph']:
model_file_name = columnInput + '_' +feature_technique_name + '_' + clf_name+ str(minOfInstancePerClass) + '_' + str(maxOfInstancePerClass) +".pkl" model_file_name = columnInput + '_' +feature_technique_name + '_' + clf_name+ str(minOfInstancePerClass) + '_' + str(maxOfInstancePerClass) +".pkl"
if clf_name != 'bayes' : if clf_name != 'bayes' :
clf = GridSearchCV(clf, grid_param, refit = True, verbose = 3) clf = GridSearchCV(clf, grid_param, refit = True, verbose = 3, n_jobs=-1)
elif feature_technique_name == 'doc2vec': elif feature_technique_name == 'doc2vec':
continue continue
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment