import os
import pickle
import re
import time

import numpy as np
import torch
import torch.backends.cudnn as cudnn
from scipy import interpolate
from sklearn import metrics

from config import Config
from timm import models
from timm.datasets import get_test_dataloader
from timm.datasets.oulu_npu_dataset import get_oulu_npu_test_dataloader, get_oulu_npu_test_dataloader_leave_one
from timm.tools.metrics import AverageMeter, ProgressMeter, accuracy
from timm.tools.train_utils import parse_args

PICKLE_FILE = "old_plot/{}.pickle"
CONFIG = Config()
torch.hub.set_dir(CONFIG['TORCH_HOME'])

torch.backends.cudnn.benchmark = True


def test(test_loader, model, args):
    y_true = []
    y_pred = []
    y_score = []

    batch_time = AverageMeter('Time', ':6.3f')
    top1 = AverageMeter('Acc@1', ':6.2f')
    progress = ProgressMeter(len(test_loader), [batch_time, top1], prefix='Test: ')

    model.eval()

    with torch.no_grad():
        end = time.time()
        for batch_idx, sample in enumerate(test_loader):
            images = sample['images'].cuda()
            images_msr = sample['msrs'].cuda()
            labels = sample['labels'].cuda()
            y_true.extend(labels.tolist())

            # compute output
            # labels_pred = model(images)
            labels_pred = model(images, images_msr)
            # if type(model._module_copies[0]) == AttentionFusion:
            #     labels_pred = model(images, images_msr)
            # else:
            #     labels_pred = model(images)

            pred = torch.argmax(labels_pred, dim=1)
            y_pred.extend(pred.tolist())
            score = torch.nn.functional.softmax(labels_pred, dim=1)
            score = score[:, 1]
            y_score.extend(score.tolist())

            # measure accuracy and record loss
            acc1, = accuracy(labels_pred, labels)
            top1.update(acc1[0], images.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if (batch_idx + 1) % args.print_freq == 0:
                progress.display(batch_idx + 1)

    # pickle.dump([y_true, y_pred, y_score], open(PICKLE_FILE.format(args.arch), "wb"))
    pickle.dump([y_true, y_pred, y_score], open(PICKLE_FILE.format(args.resume.split('/')[-1]), "wb"))


def cal_metric(target, predicted):
    fpr, tpr, thresholds = metrics.roc_curve(target, predicted)
    _tpr = (tpr)
    _fpr = (fpr)
    tpr = tpr.reshape((tpr.shape[0], 1))
    fpr = fpr.reshape((fpr.shape[0], 1))
    scale = np.arange(0, 1, 0.00000001)
    function = interpolate.interp1d(_fpr, _tpr)
    y = function(scale)
    # znew = abs(scale + y - 1)
    # eer = scale[np.argmin(znew)]
    FPRs = {"TPR@FPR=10E-1": 0.1, "TPR@FPR=10E-2": 0.01, "TPR@FPR=10E-3": 0.001, "TPR@FPR=10E-4": 0.0001}
    TPRs = {"TPR@FPR=10E-1": 0.1, "TPR@FPR=10E-2": 0.01, "TPR@FPR=10E-3": 0.001, "TPR@FPR=10E-4": 0.0001}
    for i, (key, value) in enumerate(FPRs.items()):
        index = np.argwhere(scale == value)
        score = y[index]
        TPRs[key] = float(np.squeeze(score))
    # auc = metrics.roc_auc_score(target, predicted)

    return TPRs, {'x': scale, 'y': y}
    # return eer, TPRs, auc, {'x': scale, 'y': y}


def show_metrics(args):
    # with open(PICKLE_FILE.format(args.arch), "rb") as f:
    with open(PICKLE_FILE.format(args.resume.split('/')[-1]), "rb") as f:
        y_true, y_pred, y_score = pickle.load(f)
    print(len(y_true), len(y_pred), len(y_score))
    # Accuracy
    acc = metrics.accuracy_score(y_true, y_pred)
    # ROC
    fpr, tpr, thresholds = metrics.roc_curve(y_true, y_score, drop_intermediate=False)

    # print(fpr, tpr)
    fnr = 1 - tpr
    # Equal error rate
    eer = fnr[np.nanargmin(np.absolute((fnr - fpr)))]

    cm = metrics.confusion_matrix(y_true, y_pred)
    print("cm: ", cm)
    tn, fp, fn, tp = cm[0][0], cm[0][1], cm[1][0], cm[1][1]
    apcer = 1.0 if (fp + tn == 0) else fp / float(tn + fp)
    bpcer = 1.0 if (fn + tp == 0) else fn / float(fn + tp)
    acer = (apcer + bpcer) / 2.0
    if fn + tp == 0:
        frr = 1.0
        far = fp / float(fp + tn)
    elif fp + tn == 0:
        far = 1.0
        frr = fn / float(fn + tp)
    else:
        far = fp / float(fp + tn)
        frr = fn / float(fn + tp)
    hter = (far + frr) / 2.0
    # metric = cal_metric(y_true, y_score)
    # tprs = metric[0]

    roc_auc = metrics.auc(fpr, tpr)
    metrics_template = "EER: {:f} APCER: {:f} BPCER: {:f} ACER: {:f} HTER: {:f} ACC: {:f} AUC: {:f}"
    print(metrics_template.format(eer, apcer, bpcer, acer, hter, acc, roc_auc))
    # metrics_template = "ACC: {:f} AUC: {:f} EER: {:f} APCER: {:f} BPCER: {:f} ACER: {:f}
    # HTER: {:f} TPR@0.01: {:f} TPR@0.10: {:f} TPR@1.00: {:f}"
    # print(metrics_template.format(acc, roc_auc, eer, apcer, bpcer, acer, hter, tpr_0_01, tpr_0_10, tpr_1_00))b


def main():
    args = parse_args()
    print(args)
    args.distributed = False

    address1 = args.resume.split('/')[-1]
    # address2 = address1[-1]
    print(address1)

    os.makedirs(os.path.join(args.data_dir, 'results'), exist_ok=True)
    if args.resume:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)

        # backbone = models.__dict__[args.arch]
        # model = attention_fusion(backbone, pretrained=True)
        # model = compactnet(feature_extractor=models.__dict__[args.arch], pretrained=True)
        # model = attention_fusion(backbone=models.__dict__[args.arch], pretrained=True)
        # model = xceptioncolorspace(feature_extractor=models.__dict__[args.arch], pretrained=True)
        # model = tf_efficientnet_b3_ns(pretrained=True)
        # model = models.__dict__[args.arch](pretrained=True)
        # model = color_space_net(feature_extractor=models.__dict__[args.arch], in_channels=3, pretrained=True)

        print("Initializing Networks")
        in_chans = 3 if args.color_space != 'full' else 9
        model = models.__dict__[args.arch](pretrained=True, in_chans=in_chans, num_classes=2)
        # model = models.__dict__[args.arch](pretrained=True, in_chans=in_chans)
        model_cfg = model.default_cfg

        model.cuda()
        print("Loading checkpoint {}".format(args.resume))
        checkpoint = torch.load(args.resume, map_location="cpu")
        state_dict = checkpoint.get("state_dict", checkpoint)
        model.load_state_dict({re.sub("^module.", "", k): v for k, v in state_dict.items()}, strict=False)

        print("Initializing Data Loader")
        test_loader = get_test_dataloader(model_cfg, args)


        print("Start testing")
        test(test_loader, model, args)  # test
        show_metrics(args)


if __name__ == '__main__':
    main()
