Skip to content
Snippets Groups Projects
evaluate_bertFineTuning.py 1.47 KiB
Newer Older
import matplotlib.pyplot as plt
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import seaborn as sns










def evaluate_bertFineTuning(pred_labels_, true_labels_, encoder):
    report = classification_report( pred_labels_, true_labels_, output_dict = True)

    classes = [str(e) for e in encoder.transform(encoder.classes_)]
    classesName = encoder.classes_

    accuracy = report['accuracy']
    weighted_avg = report['weighted avg']

    precision = []
    recall = []
    f1 = []
    support = []
    dff = pd.DataFrame(columns= ['className', 'precision', 'recall', 'f1-score', 'support', 'FP', 'FN', 'TP', 'TN'])
    for c in classes:
        precision.append(report[c]['precision'])
        recall.append(report[c]['recall'])
        f1.append(report[c]['f1-score'])
        support.append(report[c]['support'])

    accuracy = report['accuracy']
    weighted_avg = report['weighted avg']
    cnf_matrix = confusion_matrix(true_labels_, pred_labels_)
    FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)
    FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)
    TP = np.diag(cnf_matrix)
    TN = cnf_matrix.sum() - (FP + FN + TP)

    dff['className'] = classesName
    dff['precision'] = precision
    dff['recall'] = recall
    dff['f1-score'] = f1
    dff['support'] = support
    dff['FP'] = FP
    dff['FN'] = FN
    dff['TP'] = TP
    dff['TN'] = TN

    return dff, accuracy, weighted_avg