from mtcnn import *
import cv2
from scipy import misc
from sklearn.model_selection import KFold
from scipy import interpolate
import torchvision
import sklearn
import time
import matplotlib.pyplot  as plt
from PIL import Image
from resnet import *

import albumentations as A
# 十折交叉验证
# 其核心思想是先近似出一个 far关于阈值的函数，然后将预期的far代入到函数，找到阈值
# 以该阈值做为判定标准，大于该阈值的认为是相同人，小于该阈值的是不同人，然后去计算准确率


class LFold:
    def __init__(self, n_splits=2, shuffle=False):
        self.n_splits = n_splits
        if self.n_splits > 1:
            self.k_fold = KFold(n_splits=n_splits, shuffle=shuffle)

    # 会根据indices生成一个生成器，n_splits为迭代次数，从indices每次采样生成两个序列，其中长度的比例为n_splits-1:1
    def split(self, indices):
        if self.n_splits > 1:
            return self.k_fold.split(indices)
        else:
            return [(indices, indices)]


# 载入pairs.txt文件，获取文件路径和same_list
def get_pairs_list(pair_list, img_root):
    with open(pair_list, 'r') as fd:
        pairs = fd.readlines()
    imgpath_list = []
    same_list = []
    subpath_list = []
    for pair in pairs:

        splits = pair.split()

        if splits[0] not in imgpath_list:
            imgpath_list.append(fr"{img_root}/{splits[0]}")
            subpath_list.append(fr"{splits[0]}")

        if splits[1] not in imgpath_list:
            imgpath_list.append(fr"{img_root}/{splits[1]}")
            subpath_list.append(fr"{splits[1]}")
        same_list.append(float(splits[2]))
    return imgpath_list, subpath_list, same_list


def load_image(imgpath):
    if imgpath is not None:
        img = cv2.imread(imgpath, 0)
        img = cv2.resize(img,(112,112))
        return img
    else:
        print("wrong imgpath input in load_image")
        return None


def restore_features(features, file_list, pairspath):
    with open(pairspath, 'r') as fd:
        pairs = fd.readlines()
    ret_features = []

    for pair in pairs:
        splits = pair.split()
        fe1 = features[file_list.index(splits[0])]
        fe2 = features[file_list.index(splits[1])]

        ret_features.append(fe1)
        ret_features.append(fe2)

    return ret_features


def get_features(model, image_paths, trans, batch_size=10):
    if model is None:
        print("wrong model input in get_features")
        return None
    if image_paths is None:
        print("wrong image_paths input in get_features")
        return None

    if batch_size == 0 or batch_size % 2 != 0:
        print("wrong batch_size input:must be even")
        return

    images_len = len(image_paths)
    # images_len = 500
    i = 0
    features = None
    while i < images_len - 1:
        # 做个条件判断，最后一次取剩下的，让推理不会出错
        if i + batch_size > images_len - 1:
            batch_paths = image_paths[i:images_len]
        else:
            batch_paths = image_paths[i:i + batch_size]

        # 读取图片，并进行归一化处理
        image_batch = []
        for one_path in batch_paths:
            img = load_image(one_path)
            img_ = np.fliplr(img).copy()  # 翻转
            image_batch.append(trans(img))  # 预处理
            image_batch.append(trans(img_))

        image_batch = torch.stack(image_batch, dim=0)
        # image_batch = trans(image_batch)

        image_batch = image_batch.cuda()
        feature = model(image_batch)
        feature = feature.cpu().detach().numpy()

        fe_1 = feature[::2]
        fe_2 = feature[1::2]
        feature = np.hstack((fe_1, fe_2))
        # print(feature.shape)

        if features is None:
            features = feature
        else:
            features = np.vstack((features, feature))
        i += batch_size
        # print(i)
    return features


# N2N计算余弦值
def cosine_similarity(features1, features2, return_n=False):
    features1 = features1 / np.linalg.norm(features1, axis=1, keepdims=True)
    features2 = features2 / np.linalg.norm(features2, axis=1, keepdims=True)

    # 计算矩阵，做为二范数归一化直接就相等了
    retval = np.dot(features1, features2.T)
    return retval if return_n else np.diagonal(retval)  # 返回对角元素


# 获取余弦相似度
def get_similairty(features):
    # 先reshpae 成N，2，1024的形状
    fe1 = features[0::2]
    fe2 = features[1::2]

    features = np.concatenate([fe1, fe2], axis=1)

    features = features.reshape(-1, 2, 1024)
    return cosine_similarity(features[:, 0], features[:, 1])


# 根据阈值 计算far 和 tar
def calc_far_tar(threshold, pred_list, gold_list):
    # 保证形状一致
    assert (pred_list.shape[0] == gold_list.shape[0])

    same_pairs = np.sum(gold_list)
    diff_pairs = np.sum(np.logical_not(gold_list))

    pred_bool_list = pred_list >= threshold

    true_accept = np.sum(np.logical_and(pred_bool_list, gold_list))
    false_accept = np.sum(np.logical_and(pred_bool_list, np.logical_not(gold_list)))

    tar = float(true_accept) / float(same_pairs)
    far = float(false_accept) / float(diff_pairs)
    return tar, far


# 通过余弦相似度来衡量，直接从文本里Load,计算时间可能会非常长,默认使用十折

def get_best_acc_kfold(y_score, y_true, threshold_range, far_target=1e-5, nrof_folds=10,threshold_step = 1000):
    """

    :param y_score: 真实值列表
    :param y_true:  错误值列表
    :param threshold_range: 设定遍历阈值的范围
    :param far_target: 目标误识率
    :param nrof_folds: n折遍历
    :param threshold_step: 遍历阈值的步长
    :return: 阈值均值，准确率均值，误识率均值
    """
    assert (y_score.shape[0] == len(y_true))

    threshold = np.arange(threshold_range[0],threshold_range[1],(threshold_range[1]-threshold_range[0])/threshold_step)

    pairs_len = len(y_true)
    thresold_len = len(threshold)
    actual_same = np.array(y_true)

    k_fold = LFold(nrof_folds,shuffle=True)

    tar = np.zeros(nrof_folds)
    far = np.zeros(nrof_folds)
    th_set = np.zeros(nrof_folds)

    k_splits = np.arange(pairs_len)
    # 十折交叉验证
    for idx_k, (train_set, test_set) in enumerate(k_fold.split(k_splits)):
        far_set = np.zeros(thresold_len)
        # 遍历阈值，获得对应所有阈值的far
        for idx_ts, ts_item in enumerate(threshold):
            _, far_set[idx_ts] = calc_far_tar(ts_item, y_score[train_set], actual_same[train_set])

        # far和th拟合出一条直线,并计算出far_taget对应的阈值
        if np.max(far_set) > far_target:
            f = interpolate.interp1d(far_set, threshold, kind='slinear')
            th = f(far_target)
        else:
            th = 0
        # 将target阈值代入到测试集中进行验证
        tar[idx_k], far[idx_k] = calc_far_tar(th, y_score[test_set], actual_same[test_set])
        th_set[idx_k] = th

    tar_mean = np.mean(tar)
    far_mean = np.mean(far)
    th_set_mean = np.mean(th_set)

    return th_set_mean, tar_mean, far_mean


def mtcnn_test():
    model = MTCNN_DET_np(True)

    root = r"e:\CASIA-WebFace\0000045"

    imageslist = []
    for filename in os.listdir(root):
        start = time.perf_counter()
        img = cv2.imread(fr"{root}/{filename}")
        try:
            images, _ = face_det_align(model, img)
        except:
            # o网络输出图片错误
            continue
        end = time.perf_counter()

        print(fr"{root}/{filename} : box number {len(images)} , det time : {round(end - start, 4)}")
        imageslist.extend(images)
        # for img,conf in zip(images,_):
        #     print('conf:',conf)
        #     cv2.imshow("src", img)
        #     cv2.waitKey(0)
    # print(imageslist.shape)
    # imageslist = np.stack(imageslist,axis=0)
    plt.figure()
    # 这里获取第一张图片，防止变成白色的图片
    fig = plt.gcf()
    for i in range(1, len(imageslist)):
        img = imageslist[i - 1]
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        plt.subplot(2, 8, i)

        plt.imshow(img)
        plt.xticks([])
        plt.yticks([])
        plt.subplots_adjust(bottom=0.8, left=0, right=.6, top=1, wspace=.001, hspace=.001)

    plt.show()
    fig.savefig("1st_align.jpg", dpi=100)


def evlauate_lwtest_data(model, img_path, pais_path):
    # 读取图片
    (file_list, subpath_list, same_list) = get_pairs_list(pais_path, img_path)
    # 推理得到特征
    trans = torchvision.transforms.Compose(
        [

            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize([0.5], [0.5]),
        ]
    )

    features = get_features(model, file_list, trans)
    features = restore_features(features, subpath_list, pais_path)
    # 特征比较，获得相似度列表
    y_score = get_similairty(features)

    # 获得准确度
    threshold_range = (0,1)
    th, acc, _ = get_best_acc_kfold(y_score, np.array(same_list), threshold_range, 1e-3,nrof_folds=10)

    return th, acc, _


if __name__ == '__main__':
    model = resnet_face18(use_se=False).cuda()
    checkpoint = torch.load(r'.\weights\v9_best.pth')
    model.load_state_dict(checkpoint['backbone'])
    model.eval()
    print(evlauate_lwtest_data(model, r'c:\Users\2\Desktop\test', "lw_pairs.txt"))
    # data = np.arange(20)
    # t_splt = LFold(n_splits=4,shuffle=True)
    #
    # print(t_splt.split(data).__next__())
    # print(t_splt.split(data).__next__())
    # a = np.array([0.8, 0.2, 0.1, 0.2, 0.3, 0.5, 0.2, 0.9])
    # b = np.array([0, 0, 0, 1, 1, 1, 1, 1])
    #
    # threshold = np.arange(0, 1.01, 0.01)
    #
    # print(get_best_acc_kfold(a, b, threshold, nrof_folds=1))
    #
    #
    # print(calc_far_tar(2,a,b))
