from __future__ import print_function

import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from matplotlib import pyplot as plt
from torch.autograd import Variable
from tqdm import tqdm

torch.backends.cudnn.bencmark = True

import os,sys,cv2,random,datetime
import argparse
import numpy as np
import zipfile

from dataset import ImageDataset
from matlab_cp2tform import get_similarity_transform_for_cv2
import net_sphere

from lighting_effects import simulate_directional_light, simulate_lighting ,add_noise,add_gaussian_blur




def alignment(src_img,src_pts):
    ref_pts = [ [30.2946, 51.6963],[65.5318, 51.5014],
        [48.0252, 71.7366],[33.5493, 92.3655],[62.7299, 92.2041] ]
    crop_size = (96, 112)
    src_pts = np.array(src_pts).reshape(5,2)

    s = np.array(src_pts).astype(np.float32)
    r = np.array(ref_pts).astype(np.float32)

    tfm = get_similarity_transform_for_cv2(s, r)
    face_img = cv2.warpAffine(src_img, tfm, crop_size)
    return face_img


def KFold(n=6000, n_folds=10, shuffle=False):
    folds = []
    base = list(range(n))
    for i in range(n_folds):
        test = base[i*n//n_folds:(i+1)*n//n_folds]
        train = list(set(base)-set(test))
        folds.append([train,test])
    return folds

def eval_acc(threshold, diff):
    y_true = []
    y_predict = []
    for d in diff:
        same = 1 if float(d[2]) > threshold else 0
        y_predict.append(same)
        y_true.append(int(d[3]))
    y_true = np.array(y_true)
    y_predict = np.array(y_predict)
    accuracy = 1.0*np.count_nonzero(y_true==y_predict)/len(y_true)
    return accuracy

def find_best_threshold(thresholds, predicts):
    best_threshold = best_acc = 0
    for threshold in thresholds:
        accuracy = eval_acc(threshold, predicts)
        if accuracy >= best_acc:
            best_acc = accuracy
            best_threshold = threshold
    return best_threshold



parser = argparse.ArgumentParser(description='PyTorch sphereface lfw')
parser.add_argument('--net','-n', default='sphere20a', type=str)
parser.add_argument('--lfw', default='../../dataset/face/lfw/lfw.zip', type=str)
parser.add_argument('--model','-m', default='sphere20a.pth', type=str)
args = parser.parse_args()

predicts=[]
net = getattr(net_sphere,args.net)()
net.load_state_dict(torch.load(args.model))
# net.cuda()
net.eval()
net.feature = True

zfile = zipfile.ZipFile(args.lfw)

landmark = {}
with open('data/lfw_landmark.txt') as f:
    landmark_lines = f.readlines()
for line in landmark_lines:
    l = line.replace('\n','').split('\t')
    landmark[l[0]] = [int(k) for k in l[1:]]

with open('data/pairs.txt') as f:
    pairs_lines = f.readlines()[1:]


# 光源变量
# angles = [0, 60, 120, 180, 240, 300]
# intensities = [0.1,0.3,0.5,1.0,1.5,2.0,3.0]
# 定义噪声强度
#noises = [0.1, 0.3, 0.5, 0.7, 0.9]
# 定义高斯模糊的核大小
kernel_sizes = [(3, 3), (5, 5), (7, 7), (9, 9), (11, 11)]
accuracies = []


#高斯模糊测试
for kernel_size in kernel_sizes:
    blur_acc = []
    predicts = []  # 确保predicts是一个列表
    for i in tqdm(range(6000), desc=f"Testing kernel size {kernel_size}"):
        p = pairs_lines[i].replace("\n", "").split("\t")
        sameflag = 1 if len(p) == 3 else 0
        name1 = f"{p[0]}/{p[0]}_{int(p[1]):04}.jpg"
        name2 = f"{p[0]}/{p[0]}_{int(p[2]):04}.jpg" if sameflag else f"{p[2]}/{p[2]}_{int(p[3]):04}.jpg"

        img1 = alignment(cv2.imdecode(np.frombuffer(zfile.read(name1), np.uint8), 1), landmark[name1])
        img2 = alignment(cv2.imdecode(np.frombuffer(zfile.read(name2), np.uint8), 1), landmark[name2])

        # 添加高斯模糊
        img1 = add_gaussian_blur(img1, kernel_size)
        img2 = add_gaussian_blur(img2, kernel_size)

        imglist = [img1, cv2.flip(img1, 1), img2, cv2.flip(img2, 1)]
        for i in range(len(imglist)):
            imglist[i] = imglist[i].transpose(2, 0, 1).reshape((1, 3, 112, 96))
            imglist[i] = (imglist[i] - 127.5) / 128.0

        img = np.vstack(imglist)
        img = Variable(torch.from_numpy(img).float())
        output = net(img)
        f = output.data
        f1, f2 = f[0], f[2]
        cosdistance = f1.dot(f2) / (f1.norm() * f2.norm() + 1e-5)
        predicts.append(f"{name1}\t{name2}\t{cosdistance}\t{sameflag}\n")

    predicts = np.array([k.strip("\n").split() for k in predicts])
    thresholds = np.arange(-1.0, 1.0, 0.005)
    folds = KFold(n=6000, n_folds=10, shuffle=False)
    acc = []
    for train, test in folds:
        best_thresh = find_best_threshold(thresholds, predicts[train])
        acc.append(eval_acc(best_thresh, predicts[test]))
    blur_acc.append(np.mean(acc))
    accuracies.append(blur_acc)
    print(f"Kernel Size {kernel_size}: {blur_acc}")

# 绘制折线图
fig, ax = plt.subplots(figsize=(10, 6))

# 绘制每个高斯模糊核大小下的准确度
ax.plot([str(k) for k in kernel_sizes], accuracies, marker='o', label="Accuracy")

ax.set_xlabel("Kernel Size")
ax.set_ylabel("Accuracy")
ax.set_title("Accuracy under different Gaussian blur kernel sizes")
ax.set_xticks([str(k) for k in kernel_sizes])
ax.legend()

plt.tight_layout()
plt.savefig("gaussian_blur_accuracy_line.png")
plt.show()

#光照模拟测试


# for intensity in intensities:
#     intensity_acc = []
#     for angle in tqdm(angles, desc=f"Simulating light intensity {intensity}"):
#         predicts = []
#         for i in range(6000):
#             p = pairs_lines[i].replace("\n", "").split("\t")
#             sameflag = 1 if len(p) == 3 else 0
#             name1 = f"{p[0]}/{p[0]}_{int(p[1]):04}.jpg"
#             name2 = f"{p[0]}/{p[0]}_{int(p[2]):04}.jpg" if sameflag else f"{p[2]}/{p[2]}_{int(p[3]):04}.jpg"
#
#             img1 = alignment(cv2.imdecode(np.frombuffer(zfile.read(name1), np.uint8), 1), landmark[name1])
#             img2 = alignment(cv2.imdecode(np.frombuffer(zfile.read(name2), np.uint8), 1), landmark[name2])
#
#             # # 添加光源模拟
#             # img1 = simulate_directional_light(img1, angle, intensity)
#             # img2 = simulate_directional_light(img2, angle, intensity)
#             img1 = simulate_lighting(img1, angle, 200, intensity)
#             img2 = simulate_lighting(img2, angle, 200, intensity)
#
#             imglist = [img1, cv2.flip(img1, 1), img2, cv2.flip(img2, 1)]
#             for i in range(len(imglist)):
#                 imglist[i] = imglist[i].transpose(2, 0, 1).reshape((1, 3, 112, 96))
#                 imglist[i] = (imglist[i] - 127.5) / 128.0
#
#             img = np.vstack(imglist)
#             img = Variable(torch.from_numpy(img).float())
#             output = net(img)
#             f = output.data
#             f1, f2 = f[0], f[2]
#             cosdistance = f1.dot(f2) / (f1.norm() * f2.norm() + 1e-5)
#             predicts.append(f"{name1}\t{name2}\t{cosdistance}\t{sameflag}\n")
#
#         predicts = np.array([k.strip("\n").split() for k in predicts])
#         thresholds = np.arange(-1.0, 1.0, 0.005)
#         folds = KFold(n=6000, n_folds=10, shuffle=False)
#         acc = []
#         for train, test in folds:
#             best_thresh = find_best_threshold(thresholds, predicts[train])
#             acc.append(eval_acc(best_thresh, predicts[test]))
#         intensity_acc.append(np.mean(acc))
#     accuracies.append(intensity_acc)
#     #打印准确度
#     print(f"Intensity {intensity}: {intensity_acc}")

# 绘制折线图
# fig, ax = plt.subplots(figsize=(10, 6))
#
# for intensity, acc in zip(intensities, accuracies):
#     ax.plot(angles, acc, marker='o', label=f"Intensity {intensity}")
#
# ax.set_xlabel("Light Source Angle (degrees)")
# ax.set_ylabel("Accuracy")
# ax.set_title("Accuracy under different light source angles and intensities")
# ax.set_xticks(angles)
# ax.legend()
#
# plt.tight_layout()
# plt.savefig("light_variation_accuracy_line.png")
# plt.show()
#
#
# # 绘制条形图
# x = np.arange(len(angles))
# width = 0.2
# fig, ax = plt.subplots(figsize=(10, 6))
#
# for idx, (intensity, acc) in enumerate(zip(intensities, accuracies)):
#     ax.bar(x + idx * width, acc, width, label=f"Intensity {intensity}")
#
# ax.set_xlabel("Light Source Angle (degrees)")
# ax.set_ylabel("Accuracy")
# ax.set_title("Accuracy under different light source angles and intensities")
# ax.set_xticks(x + width)
# ax.set_xticklabels([f"{angle}°" for angle in angles])
# ax.legend()
#
# plt.tight_layout()
# plt.savefig("light_variation_accuracy.png")
# plt.show()


#噪声测试
# for noise_level in noise_levels:
#     noise_acc = []
#     predicts = []  # 确保predicts是一个列表
#     for i in tqdm(range(6000), desc=f"Testing noise level {noise_level}"):
#         p = pairs_lines[i].replace("\n", "").split("\t")
#         sameflag = 1 if len(p) == 3 else 0
#         name1 = f"{p[0]}/{p[0]}_{int(p[1]):04}.jpg"
#         name2 = f"{p[0]}/{p[0]}_{int(p[2]):04}.jpg" if sameflag else f"{p[2]}/{p[2]}_{int(p[3]):04}.jpg"
#
#         img1 = alignment(cv2.imdecode(np.frombuffer(zfile.read(name1), np.uint8), 1), landmark[name1])
#         img2 = alignment(cv2.imdecode(np.frombuffer(zfile.read(name2), np.uint8), 1), landmark[name2])
#
#         # 添加噪声
#         img1 = add_noise(img1, noise_level)
#         img2 = add_noise(img2, noise_level)
#
#         imglist = [img1, cv2.flip(img1, 1), img2, cv2.flip(img2, 1)]
#         for i in range(len(imglist)):
#             imglist[i] = imglist[i].transpose(2, 0, 1).reshape((1, 3, 112, 96))
#             imglist[i] = (imglist[i] - 127.5) / 128.0
#
#         img = np.vstack(imglist)
#         img = Variable(torch.from_numpy(img).float())
#         output = net(img)
#         f = output.data
#         f1, f2 = f[0], f[2]
#         cosdistance = f1.dot(f2) / (f1.norm() * f2.norm() + 1e-5)
#         predicts.append(f"{name1}\t{name2}\t{cosdistance}\t{sameflag}\n")
#
#     predicts = np.array([k.strip("\n").split() for k in predicts])
#     thresholds = np.arange(-1.0, 1.0, 0.005)
#     folds = KFold(n=6000, n_folds=10, shuffle=False)
#     acc = []
#     for train, test in folds:
#         best_thresh = find_best_threshold(thresholds, predicts[train])
#         acc.append(eval_acc(best_thresh, predicts[test]))
#     noise_acc.append(np.mean(acc))
#     accuracies.append(noise_acc)
#     print(f"Noise Level {noise_level}: {noise_acc}")
#
# # 绘制折线图
# fig, ax = plt.subplots(figsize=(10, 6))
#
# for noise_level, acc in zip(noise_levels, accuracies):
#     ax.plot(noise_levels, acc, marker='o', label=f"Noise Level {noise_level}")
#
# ax.set_xlabel("Noise Level")
# ax.set_ylabel("Accuracy")
# ax.set_title("Accuracy under different noise levels")
# ax.set_xticks(noise_levels)
# ax.legend()
#
# plt.tight_layout()
# plt.savefig("noise_variation_accuracy_line.png")
# plt.show()






