import numpy as np
import cv2
import time
from resnet import *
import torch
import math
import pickle
import mxnet as mx
import torchvision
from evaluate_kfold import get_best_acc_kfold
from PIL import Image
import os
from torch.nn import DataParallel
from utils import utils


def get_lfw_list(pair_list):
    with open(pair_list, 'r') as fd:
        pairs = fd.readlines()
    data_list = []
    for pair in pairs:

        splits = pair.split()

        if splits[0] not in data_list:
            data_list.append(splits[0])

        if splits[1] not in data_list:
            data_list.append(splits[1])
    return data_list


def load_one_item(bin):
    img = mx.image.imdecode(bin).asnumpy()

    img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)

    return img


# 读取所有的bin文件做为二进制文件
def load_bin(path):
    bins, issame_list = pickle.load(open(path, 'rb'), encoding='bytes')
    images = []
    trans = torchvision.transforms.Compose(
        [
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize((0.5), (0.5))
        ]
    )
    for bin in bins:
        img = load_one_item(bin)
        img_flp = np.fliplr(img)


        images.append(trans(np.expand_dims(img,axis=2)))
        images.append(trans(np.expand_dims(img_flp,axis=2).copy()))
        # images.append(trans(np.expand_dims(img_flp,axis=0)))
    return images, issame_list


# 推理得到特征向量，批次必须为偶数
def get_features(model,images,batch_size=10):

    if images is None:
        print("wrong images input")
        return
    if model is None:
        print("wrong mode input")
        return
    if batch_size == 0 or batch_size%2 !=0:
        print("wrong batch_size input:must be even")
        return

    images_len = len(images)
    # images_len = 500
    i=0
    features = None
    while i < images_len-1:
        # 做个条件判断，最后一次取剩下的，让推理不会出错
        if i+batch_size > images_len-1:
            image_batch = images[i:images_len]
        else:
            image_batch = images[i:i+batch_size]
        image_batch = torch.stack(image_batch,dim=0)
        # image_batch = trans(image_batch)

        image_batch = image_batch.cuda()
        feature = model(image_batch)
        feature = feature.cpu().detach().numpy()

        fe_1 = feature[::2]
        fe_2 = feature[1::2]
        feature = np.hstack((fe_1, fe_2))
        # print(feature.shape)

        if features is None:
            features = feature
        else:
            features = np.vstack((features, feature))
        i += batch_size
        # print(i)
    return features

# N2N计算余弦值
def cosine_similarity(features1,features2,return_n = False):
    features1 = features1/np.linalg.norm(features1,axis=1,keepdims=True)
    features2 = features2/np.linalg.norm(features2,axis=1,keepdims=True)

    # 计算矩阵，做为二范数归一化直接就相等了
    retval = np.dot(features1, features2.T)
    return retval if return_n else np.diagonal(retval) # 返回对角元素

# 获取余弦相似度
def get_similairty(features):
    # 先reshpae 成N，2，1024的形状
    fe1 = features[0::2]
    fe2 = features[1::2]

    features = np.concatenate([fe1,fe2],axis=1)

    print(features.shape)
    features = features.reshape(-1,2,1024)
    return cosine_similarity(features[:,0],features[:,1])

# 计算最高的准确率
def get_best_acc(y_score,y_true):
    """
        y_score : 预测的余弦相似度
        y_true  : 实际标签
        """
    y_score = np.asarray(y_score)
    y_true = np.asarray(y_true)
    best_acc = 0
    best_th = 0

    # 遍历相似度，以该相似度做为阈值，去对比所有的标签获得最高的精确度。
    for i in range(len(y_score)):
        th = y_score[i]

        y_test = (y_score >= th)

        acc = np.mean((y_test == y_true).astype(int))
        if acc > best_acc:
            best_acc = acc
            best_th = th
    return (best_acc, best_th)



def cosine_similaritytest():
    a1 = np.random.randn(10, 1024)
    a2 = np.random.randn(10, 1024)

    print(a1.shape)
    print(a2.shape)
    np_cossmilarity = cosine_similarity(a1, a2)

    print(np_cossmilarity)

    a1 = torch.from_numpy(a1)
    a2 = torch.from_numpy(a2)

    print(torch.cosine_similarity(a1, a2))

def evlauate_test_data(model,path):
    images,y_true_list = load_bin(path)
    features = get_features(model,images)
    y_score = get_similairty(features)

    # acc,th = get_best_acc(y_score,y_true_list)
    threshold = np.arange(0,1.001,0.001)
    th,acc,_ = get_best_acc_kfold(y_score, y_true_list, threshold, 1e-3)
    print("1e-3 result:",th,acc,_)
    print("1e-4 result:",get_best_acc_kfold(y_score,y_true_list,threshold,1e-4))
    print("1e-5 result:",get_best_acc_kfold(y_score,y_true_list,threshold,1e-5))
    print("1e-6 result:",get_best_acc_kfold(y_score,y_true_list,threshold,1e-6))
    return acc,th




def stack_tst():
    a1 = np.random.randn(10, 1024)

    fe1 = a1[0::2]
    fe2 = a1[1::2]

    feats = np.dstack([fe1,fe2])
    print(feats.shape)

if __name__ == '__main__':
    model = resnet_face18(use_se=False).cuda()
    checkpoint = torch.load(r'.\weights\v9_best.pth')
    model.load_state_dict(checkpoint['backbone'])
    model.eval()
    print(evlauate_test_data(model,r"e:\faces_webface_112x112\lfw.bin"))

    # stack_tst()

    # images, issame_list = load_bin(r'e:\faces_webface_112x112\lfw.bin')
    # get_features(1,images)
    # print(len(images), issame_list)


