import numpy as np
import cv2
import time
from resnet import *
import torch
import math
import pickle
import os
from torch.nn import DataParallel
from utils import utils
DEVICE="cuda:0"
def get_lfw_list(pair_list):
    with open(pair_list, 'r') as fd:
        pairs = fd.readlines()
    data_list = []
    for pair in pairs:

        splits = pair.split()

        if splits[0] not in data_list:
            data_list.append(splits[0])

        if splits[1] not in data_list:
            data_list.append(splits[1])

    return data_list


def load_image(img_path):
    # 读出灰度图
    if type(img_path) is 'srt':
        image = cv2.imread(img_path, 0)
    else:
        image = img_path
    if image is None:
        return None
    image = cv2.resize(image,(112,112))# 将图片resize成112x112尺寸的
    image = np.dstack((image, np.fliplr(image)))
    # 水平翻转图片
    image = image.transpose((2, 0, 1))
    image = image[:, np.newaxis, :, :]

    image = image.astype(np.float32, copy=False)
    image -= 127.5
    image /= 127.5

    return image


def get_featurs1(model, test_dict, batch_size=10):
    images = None
    features = None

    cnt = 0
    i = 0

    # print(test_dict)
    for filename,path in test_dict.items():

        # 对图片做归一化 ---》 [-1,1]
        image = load_image(path)
        if image is None:
            print(f'read {filename} error')

        # 将image 添加到 images中？？？？ 这里是每个batch推理一次
        if images is None:
            images = image
        else:
            images = np.concatenate((images, image), axis=0)

        # 条件判断 images的形状与batch_size相等或者到了最后一轮
        if images.shape[0] % batch_size == 0 or i == len(test_dict) - 1:
            cnt += 1

            data = torch.from_numpy(images)
            data = data.cuda()
            output = model(data)
            output = output.data.cpu().numpy()

            # 将推理出的向量按照列的方式拼接到一起，每张图片都有两个角度
            fe_1 = output[::2]
            fe_2 = output[1::2]
            feature = np.hstack((fe_1, fe_2))
            # print(feature.shape)

            if features is None:
                features = feature
            else:
                features = np.vstack((features, feature))

            images = None

        i += 1
    # 返回为numpy，形状为6000,2,N,和txt中文件的名字是一一对应的。 cnt为循环的次数
    # print(features.shape)
    return features, cnt

def get_featurs(model, test_list, batch_size=10):
    # model:传入模型
    # test_list:测试图片的列表
    # batch_size：一次输入模型的图片数量?
    images = None
    features = None

    cnt = 0
    for i, img_path in enumerate(test_list):
        # 对图片做归一化 ---》 [-1,1]
        image = load_image(img_path)
        if image is None:
            print('read {} error'.format(img_path))



        # 将image 添加到 images中？？？？ 这里是每个batch推理一次
        if images is None:
            images = image
        else:
            images = np.concatenate((images, image), axis=0)

        # 条件判断 images的形状与batch_size相等或者到了最后一轮
        if images.shape[0] % batch_size == 0 or i == len(test_list) - 1:
            cnt += 1

            data = torch.from_numpy(images)
            data = data.cuda()
            output = model(data)
            output = output.data.cpu().numpy()


            # 将推理出的向量按照列的方式拼接到一起，每张图片都有两个角度
            fe_1 = output[::2]
            fe_2 = output[1::2]
            feature = np.hstack((fe_1, fe_2))
            # print(feature.shape)

            if features is None:
                features = feature
            else:
                features = np.vstack((features, feature))

            images = None
    # 返回为numpy，形状为6000,2,N,和txt中文件的名字是一一对应的。 cnt为循环的次数
    # print(features.shape)
    return features, cnt


def load_model(model, model_path):
    model_dict = model.state_dict()
    pretrained_dict = torch.load(model_path)
    pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)




def get_feature_dict(test_list, features):
    fe_dict = {}
    for i, each in enumerate(test_list):
        # key = each.split('/')[1]
        fe_dict[each] = features[i]
    return fe_dict


def cosin_metric(x1, x2):
    return np.dot(x1, x2) / (np.linalg.norm(x1) * np.linalg.norm(x2))


# 计算真实标签的准确率
def cal_accuracy(y_score, y_true):
    """
    y_score : 预测的余弦相似度
    y_true  : 实际标签
    """
    y_score = np.asarray(y_score)
    y_true = np.asarray(y_true)
    best_acc = 0
    best_th = 0

    # 遍历相似度，以该相似度做为阈值，去对比所有的标签获得最高的精确度。
    for i in range(len(y_score)):
        th = y_score[i]

        y_test = (y_score >= th)

        acc = np.mean((y_test == y_true).astype(int))
        if acc > best_acc:
            best_acc = acc
            best_th = th
    return (best_acc, best_th)


def yyytest_performance(fe_dict, pair_list):
    # fedict : 人脸对应的特征向量
    # txt文本
    with open(pair_list, 'r') as fd:
        pairs = fd.readlines()

    sims = []
    labels = []
    for pair in pairs:
        splits = pair.split()
        fe_1 = fe_dict[splits[0]]
        fe_2 = fe_dict[splits[1]]

        # 取出人脸向量
        label = int(splits[2])

        # 计算余弦相似度
        sim = cosin_metric(fe_1, fe_2)

        # 将余弦相似度和标签存入列表
        sims.append(sim)
        labels.append(label)

    acc, th = cal_accuracy(sims, labels)
    return acc, th


def lfw_test1(model, img_dict, identity_list, compair_list, batch_size):
    s = time.time()
    # 推理，得到每张图片对应的特征向量
    features, cnt = get_featurs1(model, img_dict, batch_size=batch_size)

    # print(features.shape)
    t = time.time() - s
    print('total time is {}, average time is {}'.format(t, t / cnt))
    # 将特征向量和人名组成一个列表
    fe_dict = get_feature_dict(identity_list, features)
    #
    acc, th = yyytest_performance(fe_dict, compair_list)
    print('lfw face verification accuracy: ', acc, 'threshold: ', th)
    return acc,th

def lfw_test(model, img_paths, identity_list, compair_list, batch_size):
    s = time.time()
    # 推理，得到每张图片对应的特征向量
    features, cnt = get_featurs(model, img_paths, batch_size=batch_size)

    # print(features.shape)
    t = time.time() - s
    print('total time is {}, average time is {}'.format(t, t / cnt))
    # 将特征向量和人名组成一个字典
    fe_dict = get_feature_dict(identity_list, features)
    #
    acc, th = yyytest_performance(fe_dict, compair_list)
    print('lfw face verification accuracy: ', acc, 'threshold: ', th)
    return acc,th

def listslide():
    a = np.array([[3, 2, 1, 5, 1, 213, 10], [123, 12, 3, 51, 6, 1, 6]])
    b = [3, 2, 1, 5, 1, 213, 10]
    c = torch.from_numpy(a)
    print(a[:, 2:0:-1])

    print(b[5:1:-1])
    print(c[:, 2:0:-1])


def np_nonzeros():
    a = np.array([[1, 0, 3], [0, 2, 0], [0, 0, 9]])
    b = np.where(a>0)

    b=np.vstack(b).T

    print(b)



def model_check(model,binpath,pairpath):

    img_dict = utils.load_bin(binpath, pairpath)
    # dict_list = []
    # for item,data in img_dict.items():
    #     dict_list.append(item)
    #     img = cv2.imread(data)
    #     print(item,img.shape)
    #     cv2.imshow('dst',img)
    #     cv2.waitKey(0)

    identity_list = get_lfw_list(pairpath)

    # for i in range(len(identity_list)):
    #     if dict_list[i] != identity_list[i]:
    #         print("wrong list rank",identity_list[i],dict_list[i])
    # print('rank is same')
    model.eval()
    cur_acc, th = lfw_test1(model, img_dict, identity_list, pairpath, 32)
    # print(cur_acc,th)
    return cur_acc, th



def npwhere3dtest():
    a = np.random.randn(3,4,4)
    print(a.shape)
    idxs = np.where(a > 0.1)

    idxs = np.vstack(idxs).T
    print(idxs)
def calc_theta(theta):
    return math.acos(theta)/math.pi*180

if __name__ == '__main__':
    npwhere3dtest()
    # print(calc_theta(0.28))
    # print(math.cos(35/180*math.pi))
    # model = resnet_face18(use_se=False).cuda()
    # checkpoint = torch.load(r'.\weights\v5_best.pth')
    # model.load_state_dict(checkpoint['backbone'])
    # model_check(model,r"e:\faces_webface_112x112\lfw.bin", r"lfw_test_pair.txt")
    # tdict = utils.load_bin(r"e:\faces_webface_112x112\lfw.bin",r"lfw_test_pair.txt")
    # print(len(tdict))
# np_nonzeros()

# listslide()`
