# -*- coding: utf-8 -*-
"""
Created on 18-5-30 下午4:55

@author: ronghuaiyang
"""
from __future__ import print_function
import os
import sys
import cv2
from models import *
from models.resnet import resnet_face18
import torch
import numpy as np
import time
from config.config import Config_test
from torch.nn import DataParallel
from create_txt import test_create
from resize import resize_photo


def get_list(pair_list):
    with open(pair_list, 'r') as fd:
        pairs = fd.readlines()
    data_list = []
    for pair in pairs:
        splits = pair.split()

        if splits[0] not in data_list:
            data_list.append(splits[0])

        if splits[1] not in data_list:
            data_list.append(splits[1])
    return data_list


def load_image(img_path):
    image = cv2.imread(img_path, 0)
    if image is None:
        return None
    image = np.dstack((image, np.fliplr(image)))
    image = image.transpose((2, 0, 1))
    image = image[:, np.newaxis, :, :]
    image = image.astype(np.float32, copy=False)
    image -= 127.5
    image /= 127.5
    return image


def get_features(model, test_list, batch_size=10):
    images = None
    features = None
    cnt = 0
    for i, img_path in enumerate(test_list):
        image = load_image(img_path)
        if image is None:
            print('read {} error'.format(img_path))

        if images is None:
            images = image
        else:
            images = np.concatenate((images, image), axis=0)

        if images.shape[0] % batch_size == 0 or i == len(test_list) - 1:  # images.shape[60,1,128,128]
            cnt += 1

            data = torch.from_numpy(images)
            data = data.to(torch.device("cuda"))
            output = model(data)
            output = output.data.cpu().numpy()  # 60x512

            fe_1 = output[::2]  # 步长为2
            fe_2 = output[1::2]  # 错峰，从1开始，步长为2
            feature = np.hstack((fe_1, fe_2))  # 水平拼接 30x1024
            # print(feature.shape)

            if features is None:
                features = feature
            else:
                features = np.vstack((features, feature))  # 竖直拼接

            images = None

    return features, cnt


def load_model(model, model_path):
    model_dict = model.state_dict()
    pretrained_dict = torch.load(model_path)
    pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)


def get_feature_dict(test_list, features):
    fe_dict = {}
    for i, each in enumerate(test_list):  # 用字典形式存储每一张图片的特征
        fe_dict[each] = features[i]
    return fe_dict


def cosin_metric(x1, x2):
    return np.dot(x1, x2) / (np.linalg.norm(x1) * np.linalg.norm(x2))  # 计算余弦相似度，衡量两个向量的相似度


def cal_accuracy(y_score, y_true):
    y_score = np.asarray(y_score)
    y_true = np.asarray(y_true)
    best_acc = 0
    best_th = 0
    for i in range(len(y_score)):
        th = y_score[i]
        y_test = (y_score >= th)
        acc = np.mean((y_test == y_true).astype(int))
        if acc > best_acc:
            best_acc = acc
            best_th = th

    return (best_acc, best_th)


def test_performance(fe_dict, pair_list, mode):
    with open(pair_list, 'r') as fd:
        pairs = fd.readlines()
    sims = []
    labels = []
    for pair in pairs:  # ['n1/a.jpg n1/b.jpg\n']
        splits = pair.split()
        fe_1 = fe_dict[splits[0]]
        fe_2 = fe_dict[splits[1]]
        sim = cosin_metric(fe_1, fe_2)  # 计算二者相似度
        sims.append(sim)
    if mode=='lfw':  # lfw模式要进行阈值自适应和准确率计算
        label = int(splits[2])
        labels.append(label)
        acc, th = cal_accuracy(sims,labels)
        return acc, th
    else:
        return sims


def my_test(model, img_paths, identity_list, compair_list, batch_size,mode='test'):
    s = time.time()
    features, cnt = get_features(model, img_paths, batch_size=batch_size)  # 获取每一张图片的特征
    t = time.time() - s
    print('total time is {}, average time is {}'.format(t, t / cnt))
    fe_dict = get_feature_dict(identity_list, features)  # 利用字典存储每一张图片的网络输出特征
    if mode == 'lfw':  # lfw测试集的话，需要自己生成一个阈值并计算准确率！
        acc, th = test_performance(fe_dict, compair_list, mode)
        print('lfw face verification accuracy: ', acc, 'threshold: ', th)
        return acc
    else:
        sims = test_performance(fe_dict, compair_list, mode)  # 计算每一组测试集合的距离
        return sims


if __name__ == '__main__':  # 步骤和train一模一样
    test_root, result_csv_path = sys.argv[1:3]
    opt = Config_test(test_root,result_csv_path)  # 传入命令行参数
    Path0 = os.path.join(os.path.dirname(os.path.abspath(__file__)),opt.test_root)
    Path1 = os.path.join(os.path.dirname(os.path.abspath(__file__)),opt.result_file)
    
    resize_photo(Path0)

    test_create(Path0)  # 生成txt文件

    if opt.backbone == 'resnet18':
        model = resnet_face18(opt.use_se)
    elif opt.backbone == 'resnet34':
        model = resnet34()
    elif opt.backbone == 'resnet50':
        model = resnet50()
    else:
        model = resnet152()

    model = DataParallel(model)
    
    Path2 = os.path.join(os.path.dirname(os.path.abspath(__file__)),opt.test_model_path)
    # TODO : 读取报错
    model.load_state_dict(torch.load(Path2))  # 加载模型权重
    model.to(torch.device("cuda"))

    # Path3 = os.path.join(os.path.dirname(os.path.abspath(__file__)), opt.test_list)
    identity_list = get_list(opt.test_list) # ['n1/a.jpg', 'n1/b.jpg', ]
    img_paths = [os.path.join(Path0, each) for each in identity_list]  # 获取测试集图片的绝对路径

    model.eval()  # 设置评估模式
    res = my_test(model, img_paths, identity_list, opt.test_list, opt.test_batch_size)  # 预测并生成距离向量
    test_id = []
    for i in identity_list:
        tmp = i.split('/')[0]  # 获取每一组测试数据集
        if tmp not in test_id:
            test_id.append(tmp)  # 取测试集的每一个文件夹的id
    fw = open(Path1, "w")
    fw.write("id,dis,pred_pro\n")
    mx = max(res)
    mn = min(res)
    for i in range(len(res)):
        pred_pro = (res[i]-mn)/(mx-mn)  # 进行归一化
        fw.write("{},{},{}\n".format(test_id[i], res[i], pred_pro))  # 将每一行结果进行写入
    fw.close()

