import torch
import torch.nn as nn
import time
import sys

sys.path.append("./")
from train import batchify_with_label


from torchmetrics import F1Score, Precision, Recall
from torchmetrics import AUC
from loguru import logger
from sklearn.metrics import classification_report


def cal_metric(y_pred, y_true):
    """
    p ,r, f1, auc
    """

    y_pred = torch.tensor(y_pred)
    y_true = torch.tensor(y_true)

    assert y_true.shape == y_pred.shape
    p = Precision()(y_true, y_pred)
    r = Recall()(y_true, y_pred)
    f1 = F1Score()(y_true, y_pred)
    auc = AUC(reorder=True)(y_true, y_pred)

    fmt = "P: {}, R:{}, F1:{}, AUC:{}".format(p, r, f1, auc)
    # print(fmt)
    logger.info(fmt)

    return p, r, f1, auc


def report(data, y_pred, y_true):

    info = [
        data.label_alphabet.get_instance(i) for i in range(data.label_alphabet_size)
    ]
    fmt = "{}".format(data.label_alphabet.instance2index)
    # print(fmt)
    logger.info(fmt)
    if set(y_pred) == len(info) and set(y_true) == len(info):
        fmt = classification_report(y_pred, y_true, digits=5, target_names=info)
    else:
        fmt = classification_report(y_pred, y_true, digits=5, zero_division=1)

    # print(fmt)
    logger.info("\n" + fmt)


def evaluate(data, model, args, name="test"):
    if name == "train":
        instances = data.train_Ids
    elif name == "dev":
        instances = data.dev_Ids
    elif name == "test":
        instances = data.test_Ids
    elif name == "raw":
        instances = data.raw_Ids
    else:
        print("Error: wrong evaluate name,", name)

    ## set model in eval model
    model.eval() ### self.training ? dropout % bn 

    batch_size = args.batch_size
    start_time = time.time()
    train_num = len(instances)
    total_batch = train_num // batch_size + 1

    y_pred = []
    y_true = []

    for batch_id in range(total_batch):
        with torch.no_grad():
            start = batch_id * batch_size
            end = (batch_id + 1) * batch_size
            if end > train_num:
                end = train_num
                continue

            instance = instances[start:end]

            (
                gaz_list,
                batch_word,
                batch_biword,
                batch_wordlen,
                batch_label,
                layer_gaz,
                gaz_count,
                gaz_chars,
                gaz_mask,
                gazchar_mask,
                mask,
                batch_bert,
                bert_mask,
                scopes
            ) = batchify_with_label(instance)

            if args.fuse_type == "moe" or args.fuse_type == "hmoe":
                prob, _ = model(
                    batch_word,
                    batch_biword,
                    layer_gaz,
                    gaz_count,
                    gaz_chars,
                    gaz_mask,
                    gazchar_mask,
                    batch_bert,
                    batch_label,
                    bert_mask,
                    scopes
                )
            else:
                prob = model(
                    batch_word,
                    batch_biword,
                    layer_gaz,
                    gaz_count,
                    gaz_chars,
                    gaz_mask,
                    gazchar_mask,
                    batch_bert,
                    bert_mask,
                    batch_label,
                    scopes,
                )

            y_pred.append(prob.argmax(dim=-1).tolist())
            y_true.append(batch_label.squeeze().tolist())

    y_pred, y_true = sum(y_pred, []), sum(y_true, [])

    report(data, y_pred, y_true)
    return cal_metric(y_pred, y_true)


def main():
    pass
    # TODO


if __name__ == "__main__":
    main()
