'''
Calculate recall, precision, F1 and accuracy
args:
    --dataset: where to find the confusion matrix
'''

import warnings
warnings.filterwarnings("ignore")

from .parse import get_conf_mat
from sklearn.metrics import accuracy_score, precision_score, f1_score, recall_score
import argparse
import json

def from_conf_mat(mat):
    '''
    input:
        mat: the confusion matrix
    return:
        y_true: a list of true labels of all the samples
        y_pred: corresponding predictions
    '''
    y_true = []
    for idx, num in enumerate(mat.sum(axis=1).tolist()):
        y_true += [idx]*num
    assert len(y_true) == mat.sum()
    y_pred = []
    for rows in mat.tolist():
        for idx, val in enumerate(rows):
            y_pred += [idx]*val
    assert len(y_pred) == len(y_true)
    return y_true, y_pred

def cal(mat, average='macro'):
    '''
    input:
        mat: confusion matrix
        average: 'macro', 'micro', 'weighted'
    return:
        Dict: 4 kinds of scores
    '''
    y_true, y_pred = from_conf_mat(mat)
    return {
        'accuracy': accuracy_score(y_true, y_pred),
        'precision': precision_score(y_true, y_pred, average=average),
        'recall': recall_score(y_true, y_pred, average=average),
        'F1': f1_score(y_true, y_pred, average=average)
    }

datasets = {
    # dataset name: (label numbers, lines per row)
    "CSTNET-TLS1.3": (120, 7),
    "USTC-TFC": (20, 2),
    "ISCX-VPN-app": (17, 1),
    "ISCX-VPN-service": (12, 1),
    "ISCX-Tor": (16, 1)
}

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--dataset", type=str, default="", help="The dataset to get confusion matrix")
    args = parser.parse_args()
    dataset_info = datasets.get(args.dataset)
    if dataset_info is None:
        print("No such dataset!")
        exit(-1)
    mat = get_conf_mat(*dataset_info, args.dataset)
    output = {}
    for avg in ['macro', 'micro', 'weighted']:
        output[avg] = cal(mat, avg)
    print(json.dumps(output, sort_keys=False, indent=4))