import torchvision
import torch
import torch.nn as nn
import torch.utils.data as data
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
import numpy as np
import cv2
from PIL import Image
from siamese_utils import Normalize, DDFA_Pairs_Dataset
from verification import calculate_roc, generate_roc_curve, match1pair

import matplotlib.pylab as plt
from siamese_network import load_resnet50


# 定义一个图像显示函数
def my_imshow(image, title=None):
    plt.imshow(image)
    if title is not None:
        plt.title(title)
    plt.pause(1)  # 这里延时一下，否则图像无法加载

    # 定义一个图像显示函数


def my_imshow2(image0, image1, text=None):
    plt.subplot(1, 2, 1)
    plt.imshow(image0)
    plt.subplot(1, 2, 2)
    plt.imshow(image1)
    if text:
        plt.text(40, 0, text, style='italic',fontweight='bold',
         bbox={'facecolor':'white', 'alpha':0.8, 'pad':10})
    plt.show()
    plt.pause(1)  # 这里延时一下，否则图像无法加载


def sia_extract_feature_ddfa(checkpoint_fp, root, pairs_txt, log_dir, device_ids = [0],
                      batch_size = 32, num_workers = 8):
    map_location = {f'cuda:{i}': 'cuda:0' for i in range(8)}
    checkpoint = torch.load(checkpoint_fp, map_location=map_location)['state_dict']     ## 把张量从GPU 0~7 移动到 GPU 0, get paramerm's weight
    torch.cuda.set_device(device_ids[0])                                                #bing cong zi dian zhong na chu key=='state_dict' de nei rong
    model = load_resnet50()    #get a model explain or document
    model = nn.DataParallel(model, device_ids=device_ids).cuda()
    model.load_state_dict(checkpoint)
    dataset = DDFA_Pairs_Dataset(root, pairs_txt,transform=transforms.Compose([transforms.ToTensor(), Normalize(mean=127.5, std=128)]))
    data_loader = data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
    cudnn.benchmark = True   #总的来说，大部分情况下，设置这个 flag 可以让内置的 cuDNN 的 auto-tuner 自动寻找最适合当前配置的高效算法，来达到优化运行效率的问题。
    model.eval()   #jiang model bian cheng test pattern
    embeddings_l = []   # metrix: len(dataset)xmodel.FEATURE_DIM  === 6000x512
    embeddings_r = []
    pairs_match = []
    with torch.no_grad():     #bu ji suan gradient
        for i, (inputs_l, inputs_r, matches, path0, path1) in enumerate(data_loader):
            inputs_l = inputs_l.cuda()           #fang dao gpu shang jin xing yun suan
            inputs_r = inputs_r.cuda()           #fang dao gpu shang jin xing yun suan 
            feature_l, feature_r = model(inputs_l, inputs_r)
            #除以（div）模，归一化
            feature_l = feature_l.div(torch.norm(feature_l, p=2, dim=1, keepdim=True).expand_as(feature_l))
            feature_r = feature_r.div(torch.norm(feature_r, p=2, dim=1, keepdim=True).expand_as(feature_r))
            for j in range(feature_l.shape[0]): #output.shape[0] = 128 == batch_size
                feature_l_np = feature_l[j].cpu().numpy().flatten()
                feature_r_np = feature_r[j].cpu().numpy().flatten()
                matches_np = matches[j].cpu().numpy().flatten()
                embeddings_l.append(feature_l_np)
                embeddings_r.append(feature_r_np)
                pairs_match.append(matches_np)

    embeddings_l = np.array(embeddings_l)
    embeddings_r = np.array(embeddings_r)
    pairs_match = np.array(pairs_match)
    pairs_match = pairs_match.reshape(10)    #6000 pairs images
    thresholds = np.arange(0, 4, 0.0001)
    tpr, fpr, accuracy, best_thresholds = calculate_roc(thresholds, embeddings_l, embeddings_r, pairs_match, nrof_folds = 10, pca = 0)
    generate_roc_curve(fpr, tpr, log_dir)
    return tpr, fpr, accuracy, best_thresholds, embeddings_l, embeddings_r



def face_match(checkpoint_fp, path0, path1, label, device_ids = [0]):
    map_location = {f'cuda:{i}': 'cuda:0' for i in range(8)}
    checkpoint = torch.load(checkpoint_fp, map_location=map_location)['state_dict']     ## 把张量从GPU 0~7 移动到 GPU 0, get paramerm's weight
    torch.cuda.set_device(device_ids[0])                                                #bing cong zi dian zhong na chu key=='state_dict' de nei rong
    model = load_resnet50()    #get a model explain or document
    model = nn.DataParallel(model, device_ids=device_ids).cuda()
    model.load_state_dict(checkpoint)
    transform = transforms.Compose([transforms.ToTensor(), Normalize(mean=127.5, std=128)])
    image0 = Image.open(path0)
    image1 = Image.open(path1)
    my_imshow(image0)
    my_imshow(image1)

    inputs_l = transform(image0)
    inputs_r = transform(image1)
    inputs_l = torch.unsqueeze(inputs_l, dim=0)
    inputs_r = torch.unsqueeze(inputs_r, dim=0)
    inputs_l = inputs_l.cuda()           #fang dao gpu shang jin xing yun suan
    inputs_r = inputs_r.cuda()           #fang dao gpu shang jin xing yun suan
    feature_l, feature_r = model(inputs_l, inputs_r)
    #除以（div）模，归一化
    feature_l = feature_l.div(torch.norm(feature_l, p=2, dim=1, keepdim=True).expand_as(feature_l))
    feature_r = feature_r.div(torch.norm(feature_r, p=2, dim=1, keepdim=True).expand_as(feature_r))
    with torch.no_grad():
        feature_l_np = feature_l[0].cpu().numpy().flatten()
        feature_r_np = feature_r[0].cpu().numpy().flatten()

    embeddings_l = np.array(feature_l_np)
    embeddings_r = np.array(feature_r_np)

    predict_issame = match1pair(embeddings_l, embeddings_r, 0.0002)
    if predict_issame:
        text = "match/label[%s]"%label
    else:
        text = "not match/label[%s]"%label
    my_imshow2(image0, image1, text=text)
    return predict_issame


def imshow(img, text=None):
    npimg = img.numpy()
    plt.axis("off")
    if text:
        plt.text(75, 8, text, style='italic',fontweight='bold',
         bbox={'facecolor':'white', 'alpha':0.8, 'pad':10})
    plt.imshow(np.transpose(npimg, (1,2,0)))
    plt.show()
    plt.pause(1)


def val_all():
    ############# DDFA #####################
    checkpoint_fp = "./training_debug/logs/model/_checkpoint_epoch_50.pth.tar"
    # model storage path
    root_ddfa = r"G:\samples\face\train_aug_120x120\train_aug_120x120"     # dataset path
    pairs_txt = "./testpair_list.txt"   # test picture's name list
    log_dir = "./training_debug/logs/contrastive_loss/recognition/result.png"

    tpr, fpr, accuracy, best_thresholds, embeddings_l, embeddings_r = sia_extract_feature_ddfa(checkpoint_fp, root_ddfa, pairs_txt, log_dir, batch_size=1)
    mean_acc = np.mean(accuracy)
    # print("tpr=%f, fpr=%f, accuracy=%f, best_thresholds=%f"%(tpr, fpr, accuracy, best_thresholds))
    print(tpr)
    print(fpr)
    print(accuracy)
    print(best_thresholds)

import os
def pre_1pair():
    ############# DDFA #####################
    checkpoint_fp = "./training_debug/logs/model/_checkpoint_epoch_50.pth.tar"
    # dir_img = r"./"
    # name0 = "lt2.jpg"
    # name1 = "lt1.jpg"
    dir_img = r"G:\samples\face\train_aug_120x120\train_aug_120x120"
    # name0 = "LFPWFlip_LFPW_image_test_0144_15_2.jpg"
    # name1 = "IBUGFlip_IBUG_image_118_01_12_1.jpg"
    name0 = "LFPWFlip_LFPW_image_train_0678_9_2.jpg"
    name1 = "LFPWFlip_LFPW_image_train_0678_4_1.jpg"
    path0 = os.path.join(dir_img, name0)
    path1 = os.path.join(dir_img, name1)
    label = 1
    is_match = face_match(checkpoint_fp, path0, path1, label)
    print(is_match)


def read_pairs(pairs_filename):
    pairs = []
    with open(pairs_filename, 'r') as f:
        for line in f.readlines():
            pair = line.strip().split(",")
            pairs.append(pair)
    return pairs


def pred_pairs():
    checkpoint_fp = "./training_debug/logs/model/_checkpoint_epoch_50.pth.tar"
    dir_img = r"G:\samples\face\train_aug_120x120\train_aug_120x120"
    pairs_txt = "./testpair_list.txt"
    pairs = read_pairs(pairs_txt)
    for pair in pairs:
        name0 = pair[0]
        name1 = pair[1]
        label = pair[2]
        path0 = os.path.join(dir_img, name0)
        path1 = os.path.join(dir_img, name1)
        is_match = face_match(checkpoint_fp, path0, path1, label)
        print(is_match)


if __name__ == '__main__':
    # val_all()
    # pre_1pair()
    pred_pairs()
    # root_ddfa = r"G:\samples\face\train_aug_120x120\train_aug_120x120"     # dataset path
    # pairs_txt = "./testpair_list.txt"   # test picture's name list
    # dataset = DDFA_Pairs_Dataset(root_ddfa, pairs_txt,transform=transforms.Compose([transforms.ToTensor(), Normalize(mean=127.5, std=128)]))
    #
    # train_loader = data.DataLoader(dataset, batch_size = 8, num_workers=8,
    #                      shuffle=False, pin_memory=True, drop_last=True)
    # dataiter = iter(train_loader)
    #
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0],example_batch[1]), 0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2])























