import os
import pickle
import random
import re
import time
import warnings

import numpy as np
import torch
import torch.backends.cudnn as cudnn
from scipy import interpolate
from sklearn import metrics
from torch.cuda.amp import autocast

from config import Config
from training import models
from training.datasets.oulu_npu_dataset import get_oulu_npu_test_dataloader, get_oulu_npu_dataloader
from training.models import attention_fusion
from training.tools.metrics import AverageMeter, ProgressMeter, accuracy
from training.tools.train_utils import parse_args

PICKLE_FILE = "plot/{}.pickle"
CONFIG = Config()
torch.hub.set_dir(CONFIG['TORCH_HOME'])
os.environ["CUDA_VISIBLE_DEVICES"] = CONFIG['CUDA_VISIBLE_DEVICES']

torch.backends.cudnn.benchmark = True

def main():
    args = parse_args()
    print(args)
    args.distributed = False

    address1 = args.resume.split('/')[-1]
    # address2 = address1[-1]
    print(address1)
    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn(
            """You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints.""")

    os.makedirs(os.path.join(args.data_dir, 'results'), exist_ok=True)
    if args.resume:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)

        print("Loading checkpoint {}".format(args.resume))
        # backbone = models.__dict__[args.arch]
        # model = attention_fusion(backbone, pretrained=True)
        # model = models.__dict__[args.arch](pretrained=True)
        # model = attention_fusion(backbone=models.__dict__[args.arch], pretrained=True)
        # model = models.__dict__[args.arch](pretrained=True)
        model = attention_fusion(backbone=models.__dict__[args.arch], pretrained=True)
        # model = tf_efficientnet_b0_ns(pretrained=True)

        model.cuda()

        checkpoint = torch.load(args.resume, map_location="cpu")
        state_dict = checkpoint.get("state_dict", checkpoint)
        model.load_state_dict({re.sub("^module.", "", k): v for k, v in state_dict.items()}, strict=False)

        print("Initializing Data Loader")

        _, _, val_loader = get_oulu_npu_dataloader(model, args, color_space=args.color_space)  # for val_datasets
        test_loader = get_oulu_npu_test_dataloader(model, args, color_space=args.color_space)  # for test_datasets
        # test_loader = get_face_forensics_test_dataloader(model, args)

        print("Start validation")
        test(val_loader, model, args)  # val
        show_metrics(args)
        print("Start testing")
        test(test_loader, model, args)  # test
        show_metrics(args)


def test(test_loader, model, args):
    y_true = []
    y_pred = []
    y_score = []

    batch_time = AverageMeter('Time', ':6.3f')
    top1 = AverageMeter('Acc@1', ':6.2f')
    progress = ProgressMeter(len(test_loader), [batch_time, top1], prefix='Test: ')

    model.eval()

    with torch.no_grad():
        end = time.time()
        for batch_idx, sample in enumerate(test_loader):
            # images_rgb = sample['rgb'].cuda()
            images = sample['images'].cuda()
            images_msr = sample['msrs'].cuda()
            labels = sample['labels'].cuda()
            y_true.extend(labels.tolist())

            # compute output
            with autocast(enabled=args.use_amp):
                # labels_pred = model(images)
                labels_pred = model(images, images_msr)
                # if type(model._module_copies[0]) == AttentionFusion:
                #     labels_pred = model(images, images_msr)
                # else:
                #     labels_pred = model(images)

            pred = torch.argmax(labels_pred, dim=1)
            y_pred.extend(pred.tolist())
            score = torch.nn.functional.softmax(labels_pred, dim=1)
            score = score[:, 1]
            y_score.extend(score.tolist())

            # measure accuracy and record loss
            acc1, = accuracy(labels_pred, labels)
            top1.update(acc1[0], images.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if (batch_idx + 1) % args.print_freq == 0:
                progress.display(batch_idx + 1)

    # pickle.dump([y_true, y_pred, y_score], open(PICKLE_FILE.format(args.arch), "wb"))
    pickle.dump([y_true, y_pred, y_score], open(PICKLE_FILE.format(args.resume.split('/')[-1]), "wb"))


def cal_metric(target, predicted):
    fpr, tpr, thresholds = metrics.roc_curve(target, predicted)
    _tpr = (tpr)
    _fpr = (fpr)
    tpr = tpr.reshape((tpr.shape[0], 1))
    fpr = fpr.reshape((fpr.shape[0], 1))
    scale = np.arange(0, 1, 0.00000001)
    function = interpolate.interp1d(_fpr, _tpr)
    y = function(scale)
    # znew = abs(scale + y - 1)
    # eer = scale[np.argmin(znew)]
    FPRs = {"TPR@FPR=10E-1": 0.1, "TPR@FPR=10E-2": 0.01, "TPR@FPR=10E-3": 0.001, "TPR@FPR=10E-4": 0.0001}
    TPRs = {"TPR@FPR=10E-1": 0.1, "TPR@FPR=10E-2": 0.01, "TPR@FPR=10E-3": 0.001, "TPR@FPR=10E-4": 0.0001}
    for i, (key, value) in enumerate(FPRs.items()):
        index = np.argwhere(scale == value)
        score = y[index]
        TPRs[key] = float(np.squeeze(score))
    # auc = metrics.roc_auc_score(target, predicted)

    return TPRs, {'x': scale, 'y': y}
    # return eer, TPRs, auc, {'x': scale, 'y': y}


def show_metrics(args):
    # with open(PICKLE_FILE.format(args.arch), "rb") as f:
    with open(PICKLE_FILE.format(args.resume.split('/')[-1]), "rb") as f:
        y_true, y_pred, y_score = pickle.load(f)
    print(len(y_true), len(y_pred), len(y_score))
    # Accuracy
    acc = metrics.accuracy_score(y_true, y_pred)
    # ROC
    fpr, tpr, thresholds = metrics.roc_curve(y_true, y_score, drop_intermediate=False)

    # print(fpr, tpr)
    fnr = 1 - tpr
    # Equal error rate
    eer = fnr[np.nanargmin(np.absolute((fnr - fpr)))]

    cm = metrics.confusion_matrix(y_true, y_pred)
    print("cm: ", cm)
    tn, fp, fn, tp = cm[0][0], cm[0][1], cm[1][0], cm[1][1]
    apcer = 1.0 if (fp + tn == 0) else fp / float(tn + fp)
    bpcer = 1.0 if (fn + tp == 0) else fn / float(fn + tp)
    acer = (apcer + bpcer) / 2.0
    if (fn + tp == 0):
        frr = 1.0
        far = fp / float(fp + tn)
    elif (fp + tn == 0):
        far = 1.0
        frr = fn / float(fn + tp)
    else:
        far = fp / float(fp + tn)
        frr = fn / float(fn + tp)
    hter = (far + frr) / 2.0
    metric = cal_metric(y_true, y_score)
    tprs = metric[0]

    # tpr_0_01 = -1
    # tpr_0_02 = -1
    # tpr_0_05 = -1
    # tpr_0_10 = -1
    # tpr_0_20 = -1
    # tpr_0_50 = -1
    # tpr_1_00 = -1
    # tpr_2_00 = -1
    # tpr_5_00 = -1
    # for i in range(len(fpr)):
    #     if fpr[i] > 0.0001 and tpr_0_01 == -1:
    #         tpr_0_01 = tpr[i - 1]
    #     if fpr[i] > 0.0002 and tpr_0_02 == -1:
    #         tpr_0_02 = tpr[i - 1]
    #     if fpr[i] > 0.0005 and tpr_0_05 == -1:
    #         tpr_0_05 = tpr[i - 1]
    #     if fpr[i] > 0.001 and tpr_0_10 == -1:
    #         tpr_0_10 = tpr[i - 1]
    #     if fpr[i] > 0.002 and tpr_0_20 == -1:
    #         tpr_0_20 = tpr[i - 1]
    #     if fpr[i] > 0.005 and tpr_0_50 == -1:
    #         tpr_0_50 = tpr[i - 1]
    #     if fpr[i] > 0.01 and tpr_1_00 == -1:
    #         tpr_1_00 = tpr[i - 1]
    #     if fpr[i] > 0.02 and tpr_2_00 == -1:
    #         tpr_2_00 = tpr[i - 1]
    #     if fpr[i] > 0.05 and tpr_5_00 == -1:
    #         tpr_5_00 = tpr[i - 1]
    roc_auc = metrics.auc(fpr, tpr)
    metrics_template = "EER: {:f} APCER: {:f} BPCER: {:f} ACER: {:f} HTER: {:f} TPR@0.1: {:f} TPR@0.01: {:f} TPR@0.001: {:f} TPR@0.0001: {:f} ACC: {:f} AUC: {:f}"
    print(metrics_template.format(eer, apcer, bpcer, acer, hter, tprs["TPR@FPR=10E-1"], tprs["TPR@FPR=10E-2"],
                                  tprs["TPR@FPR=10E-3"], tprs["TPR@FPR=10E-4"], acc, roc_auc))

    # metrics_template = "ACC: {:f} AUC: {:f} EER: {:f} APCER: {:f} BPCER: {:f} ACER: {:f} HTER: {:f} TPR@0.01: {:f} TPR@0.10: {:f} TPR@1.00: {:f}"
    # print(metrics_template.format(acc, roc_auc, eer, apcer, bpcer, acer, hter, tpr_0_01, tpr_0_10, tpr_1_00))



if __name__ == '__main__':
    main()
