# -*- coding: utf-8 -*-
"""
Created on 18-5-30 下午4:55

@author: ronghuaiyang
"""
from __future__ import print_function
import os
import sys
import ctypes
import cv2
from matplotlib.pyplot import savefig

from models import *
import torch
import time
from config import Config
from torch.nn import DataParallel
from sklearn.metrics import roc_curve, roc_auc_score

from Utils.metric_and_dist import *
from matplotlib import pyplot as plt
import seaborn as sns


# c_opencv_lib_path = os.path.dirname('/home/yjy/Software/opencv-2.4.13.6/install/lib/')
# sys.path.append(c_opencv_lib_path)
# print(sys.path)


def get_lfw_list_single(single_list):
    with open(single_list, 'r') as fd:
        lines = fd.readlines()
    data_list = []
    for line in lines:
        splits = line.split()
        if splits[0] not in data_list:
            data_list.append(splits[0])
    return data_list


def get_lfw_list(pair_list):
    with open(pair_list, 'r') as fd:
        pairs = fd.readlines()
    data_list = []
    for pair in pairs:
        splits = pair.split()

        if splits[0] not in data_list:
            data_list.append(splits[0])

        if splits[1] not in data_list:
            data_list.append(splits[1])
    return data_list


def load_image(img_path, resize_shape):
    image_ori = cv2.imread(img_path, 0)
    # image_copy = image_ori.copy()
    # image_ori = image_copy[14:128 - 14, 14:128 - 14]  # adapting Mahdieh
    if image_ori is None:
        image = np.zeros((128, 128))
    else:
        image = cv2.resize(image_ori, resize_shape, cv2.INTER_CUBIC)
    if image is None:
        return None
    # image = np.dstack((image, np.fliplr(image)))
    # image = image.transpose((2, 0, 1))
    # image = image[:, np.newaxis, :, :]
    image = image[np.newaxis, np.newaxis, :, :]
    image = image.astype(np.float32, copy=False)
    image -= 127.5
    image /= 127.5

    return image


def extract_label(opt, img_path):
    filename = os.path.basename(img_path)
    if opt.dataset.find('IITD') != -1:
        person_id = filename[:3]
    elif opt.dataset == 'Tongji':
        person_id = filename[:3]
    elif opt.dataset.find('Tongji') != -1:
        person_id = filename[4:8]
    elif opt.dataset == 'GPDS':
        person_id = filename[:3]
    elif opt.dataset.find('CASIA') != -1:
        person_id = filename[:4]
    else:
        raise RuntimeError('unknown dataset type!')
    return person_id


def get_feature_nn(model, images):
    data = torch.from_numpy(images)
    data = data.to(torch.device("cuda"))
    output = model(data)
    output = output.data.cpu().numpy()
    return output


def get_feature_compcode(model, images):
    features = None
    for i in range(len(images)):
        feature = ctypes.create_string_buffer(4 * 32 * 4)
        img_data = images[i] * 127.5 + 127.5
        img_data = cv2.resize(img_data, (128, 128)).astype(np.uint8)
        img_data = img_data.reshape((-1,)).tobytes()
        model.encode_palmprint_using_bytes(img_data, feature)
        if features is None:
            features = feature
        else:
            features = np.vstack((features, feature))
    return features


def get_featurs(model, test_list, batch_size=10):
    images = None
    features = None
    cnt = 0
    for i, img_path in enumerate(test_list):
        image = load_image(img_path, resize_shape=opt.input_shape[1:])
        if image is None:
            print('read {} error'.format(img_path))

        if images is None:
            images = image
        else:
            images = np.concatenate((images, image), axis=0)

        if images.shape[0] % batch_size == 0 or i == len(test_list) - 1:
            cnt += 1

            data = torch.from_numpy(images)
            data = data.to(torch.device("cuda"))
            output = model(data)
            output = output.data.cpu().numpy()

            fe_1 = output[::2]
            fe_2 = output[1::2]
            feature = np.hstack((fe_1, fe_2))
            # print(feature.shape)

            if features is None:
                features = feature
            else:
                features = np.vstack((features, feature))

            images = None

    return features, cnt


def get_featurs_labels(opt, model, test_list, batch_size):
    images = None
    labels = []
    features = None
    for i, img_path in enumerate(test_list):
        image = load_image(img_path, resize_shape=opt.input_shape[1:])
        label = extract_label(opt, img_path)
        labels.append(label)
        if image is None:
            print('read {} error'.format(img_path))

        if images is None:
            images = image
        else:
            images = np.concatenate((images, image), axis=0)

        if images.shape[0] % batch_size == 0 or i == len(test_list) - 1:
            if isinstance(model, torch.nn.Module):
                feature = get_feature_nn(model, images)
            else:
                feature = get_feature_compcode(model, images)
            if features is None:
                features = feature
            else:
                features = np.vstack((features, feature))
            images = None

    return features, labels


def get_feature_dict(test_list, features):
    fe_dict = {}
    for i, each in enumerate(test_list):
        # key = each.split('/')[1]
        fe_dict[each] = features[i]
    return fe_dict


def load_model(model, model_path):
    model_dict = model.state_dict()
    pretrained_dict = torch.load(model_path)
    pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
    model_dict.update(pretrained_dict)
    # print(pretrained_dict.keys())
    # print(model_dict.keys())
    model.load_state_dict(model_dict)


def cal_accuracy(y_score, y_true):
    y_score = np.asarray(y_score)
    y_true = np.asarray(y_true)
    best_acc = 0
    best_th = 0
    for i in range(len(y_score)):
        th = y_score[i]
        y_test = (y_score >= th)
        acc = np.mean((y_test == y_true).astype(int))
        if acc > best_acc:
            best_acc = acc
            best_th = th

    return (best_acc, best_th)


def test_sims(fe_dict, pair_list, metric=cosin_metric):
    with open(pair_list, 'r') as fd:
        pairs = fd.readlines()

    sims = []
    labels = []
    for pair in pairs:
        splits = pair.split()
        fe_1 = fe_dict[splits[0]]
        fe_2 = fe_dict[splits[1]]
        label = int(splits[2])
        sim = metric(fe_1, fe_2)

        sims.append(sim)
        labels.append(label)
    return sims, labels


def test_performance(fe_dict, pair_list, metric):
    sims, labels = test_sims(fe_dict, pair_list, metric)
    acc, th = cal_accuracy(sims, labels)
    return acc, th


def lfw_test(model, img_paths, identity_list, compair_list, batch_size):
    s = time.time()
    features, cnt = get_featurs(model, img_paths, batch_size=batch_size)
    print(features.shape)
    t = time.time() - s
    print('total time is {}, average time is {}'.format(t, t / cnt))
    fe_dict = get_feature_dict(identity_list, features)
    acc, th = test_performance(fe_dict, compair_list)
    print('lfw face verification accuracy: ', acc, 'threshold: ', th)
    return acc


def yjy_test(model, img_paths, identity_list, compair_list, batch_size):
    s = time.time()
    features, cnt = get_featurs(model, img_paths, batch_size=batch_size)
    print(features.shape)
    t = time.time() - s
    print('total time is {}, average time is {}'.format(t, t / cnt))
    fe_dict = get_feature_dict(identity_list, features)
    sims, labels = test_sims(fe_dict, compair_list, angular_metric)
    return sims, labels


def genLeaveOneOut(model, img_paths, batch_size, numClassSamples, opt):
    print('Generating leave-one-out tests ...')
    sys.stdout.flush()

    # Prepare arrays of fvs and labels
    featureVecs = []
    labels = []

    # Compute FV for all the samples
    # featureVecs = fvFunction(X_test)
    featureVecs, labels = get_featurs_labels(opt, model, img_paths, batch_size)
    # fe_dict = get_feature_dict(identity_list, featureVecs)

    featureVecs = np.array(featureVecs)
    labels = np.array(labels)
    # For each class, select i-th sample as the probe, leave others as reference
    # There are 6 samples for each class
    uniqueLabels = list(set(labels))
    tests = []
    for i in range(numClassSamples):
        testRun = {}
        testRun['probeFvs'] = []
        testRun['probeLbls'] = []
        testRun['refFvs'] = []
        testRun['refLbls'] = []
        for lbl in uniqueLabels:
            classFvs = featureVecs[labels == lbl]
            classLbls = labels[labels == lbl]
            if i > len(classFvs) - 1:
                break
            testRun['probeFvs'].append(classFvs[i])
            testRun['probeLbls'].append(classLbls[i])
            testRun['refFvs'].append(classFvs[np.arange(len(classFvs)) != i])
            testRun['refLbls'].append(classLbls[np.arange(len(classLbls)) != i])
        testRun['probeFvs'] = np.array(testRun['probeFvs'])
        testRun['probeLbls'] = np.array(testRun['probeLbls'])
        testRun['refFvs'] = np.array(testRun['refFvs'])
        testRun['refLbls'] = np.array(testRun['refLbls'])
        tests.append(testRun)

    return tests


def testLeaveOneOut3(tests, dist_fuc, dnfold=False):
    print('Performing leave-one-out tests ...')
    sys.stdout.flush()

    for i in range(len(tests)):
        # Get current test data
        testData = tests[i]

        negScores = []
        posScores = []
        results = []
        # Loop through all the probe and find shortest distance between all the reference
        for probeIdx in range(len(testData['probeFvs'])):
            probeFv = testData['probeFvs'][probeIdx]
            probeLbl = testData['probeLbls'][probeIdx]
            distances = []
            distLabels = []
            for refIndices in range(len(testData['refFvs'])):
                dists = []
                refLbl = -6
                for refIdx in range(len(testData['refFvs'][refIndices])):
                    refFv = testData['refFvs'][refIndices][refIdx]
                    refLbl = testData['refLbls'][refIndices][refIdx]
                    # dist = 1 - abs(cosin_metric(probeFv, refFv))
                    dist = dist_fuc(probeFv, refFv)
                    dists.append(dist)

                    if not dnfold:
                        # NFOLD strategy
                        if probeLbl == refLbl:
                            posScores.append(dist)
                        else:
                            negScores.append(dist)

                minDist = np.min(dists)
                maxDist = np.max(dists)
                distances.append(minDist)
                distLabels.append((probeLbl, refLbl))
                if dnfold:
                    # DNFOLD stategy
                    if probeLbl == refLbl:
                        posScores.append(minDist)
                        # posScores.append(dists[np.random.random_integers(0, len(dists) - 1)])
                    else:
                        negScores.append(maxDist)
                        # negScores.append(minDist)
                        # negScores.append(dists[np.random.random_integers(0, len(dists) - 1)])

            # Find the minimum and store the result
            minIdx = np.argmin(distances)
            results.append((distances[minIdx], distLabels[minIdx][0], distLabels[minIdx][1]))

        yield posScores, negScores, results


def leaveOneTest(model, img_paths, batch_size, numClassSamples, opt):
    loaTests = genLeaveOneOut(model, img_paths, batch_size, numClassSamples, opt)
    cntr = 0
    y_score = []
    y_true = []
    dis_fuc = None
    if isinstance(model, torch.nn.Module):
        dis_fuc = angular_dist
    else:
        dis_fuc = c_hamming_dist
    for output in testLeaveOneOut3(loaTests, dist_fuc=dis_fuc, dnfold=True):  # TODO
        cntr += 1
        # Compute number of errors
        print('-----------------')
        print('Probe sample: {0}'.format(cntr))
        scoresPos, scoresNeg, results = output
        print(results)
        numErrors = 0
        for res in results:
            if res[1] != res[2]:
                numErrors += 1
        # print('\tNumber of scores: {0} pos, {1} neg'.format(len(scoresPos), len(scoresNeg)))
        print('\tNumber of errors: {0}/{1}'.format(numErrors, len(results)))
        y_score = y_score + scoresPos
        y_true = y_true + [1] * len(scoresPos)
        y_score = y_score + scoresNeg
        y_true = y_true + [0] * len(scoresNeg)
    y_score = np.array(y_score)
    y_score = -y_score
    return y_score.tolist(), y_true


def save_fig(y_score, y_true, path, metric_name):
    if metric_name == 'angular':
        y_score = (- np.array(y_score)) * 180 / math.pi
    elif metric_name == 'hamming':
        y_score = - np.array(y_score)
    else:
        raise ArithmeticError
    y_true = np.array(y_true)
    pos_score = y_score[y_true == 1]
    neg_score = y_score[y_true == 0]

    plt.rcParams['axes.unicode_minus'] = False  # 解决保存图像是负号'-'显示为方块的问题

    plt.figure(figsize=[15, 10])
    sns.set_context("notebook", font_scale=2.5)
    sns.set_style('whitegrid')

    color_list = ['g', 'r', 'b', 'k', 'gray']
    # sns.kdeplot(pos_score,
    #             kernel='gau',
    #             bw="scott",
    #             label="genuine",
    #             shade=True,
    #             color=color_list[0],
    #             linewidth=3
    #             )
    # sns.kdeplot(neg_score,
    #             kernel='gau',
    #             bw="scott",
    #             label="impostor",
    #             shade=True,
    #             color=color_list[1],
    #             linewidth=3
    #             )
    sns.distplot(pos_score, axlabel="x", hist=False, kde=True,
                 kde_kws={"color": color_list[0], "shade": True, "label": "genuine", "lw": 3},
                 rug=False, rug_kws={"color": "k"}, norm_hist=True
                 )
    sns.distplot(neg_score, axlabel="x", hist=False, kde=True,
                 kde_kws={"color": color_list[1], "shade": True, "label": "impostor", "lw": 3},
                 rug=False, rug_kws={"color": "k"}, norm_hist=True
                 )
    plt.xlabel('Matching score')
    # plt.ylabel('probability')
    savefig(path)


if __name__ == '__main__':

    opt = Config()
    if opt.backbone == 'resnet18':
        model = resnet18()
    elif opt.backbone == 'resnet_palm18':
        model = resnet_face18(use_se=opt.use_se)
    elif opt.backbone == 'resnet34':
        model = resnet34()
    elif opt.backbone == 'resnet50':
        model = resnet50()
    elif opt.backbone == 'palmnet_yjy':
        model = palmnet_yjy(input_shape=opt.input_shape, code_length=opt.code_length)
        # model = palmnet_yjy_coding(downsample=True)
    elif opt.backbone == 'palmnet_yjy_coding':
        model = palmnet_yjy_coding(downsample=True)

    model = DataParallel(model, device_ids=[0])
    load_model(model, opt.test_model_path)
    # model.load_state_dict(torch.load(opt.test_model_path))
    model.to(torch.device("cuda"))
    model.eval()

    identity_list = get_lfw_list(opt.lfw_test_list)
    identity_list_single = get_lfw_list_single(opt.lfw_test_list_single)
    img_paths = [os.path.join(opt.lfw_root, each) for each in identity_list_single]
    # y_score, y_true = yjy_test(model, img_paths, identity_list, opt.lfw_test_list, opt.test_batch_size)
    y_score, y_true = leaveOneTest(model, img_paths, opt.test_batch_size, opt.numClassSamples, opt)
    fpr, tpr, ths = roc_curve(y_true, y_score)
    print('numbers of y_score:', len(y_score))
    print('numbers of th:', len(fpr))
    for i in range(len(fpr)):
        if fpr[i] > (1 - tpr[i]):
            print(i, '{0}%'.format((fpr[i] + 1 - tpr[i]) * 50), ths[i])
            break

    auc = roc_auc_score(y_true, y_score)
    print('auc={0}'.format(auc))

    save_fig(y_score, y_true, './figure/score_dist_nn.png', 'angular')
