import os

import cv2
import pandas as pd
import torch
import torchvision
import torch.nn as nn
from facenet_pytorch.models.inception_resnet_v1 import InceptionResnetV1
from torch import optim
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import torchvision.utils
import numpy as np
from PIL import Image
import PIL.ImageOps

from OriginalSiamese.dataset import SiameseNetworkDataset, SiameseSingleDataset
from OriginalSiamese.facenet import collate_fn
from Siamese.PrepareData import cv_imread

print(torch.__version__)  # 1.1.0
print(torchvision.__version__)  # 0.3.0

train_dir = r"H:\48faceset\train"  # 训练集地址
val_dir = r"H:\48faceset\val"
# 定义一些超参
loadModel = False
gray = False
train_batch_size = 52  # 训练时batch_size
train_number_epochs = 30000  # 训练的epoch
early_stop = 50
lr = 0.001
bestpath = './model/model_best.ckpt'
normalpath = './model/model.ckpt'
transform = transforms.Compose([transforms.Resize((120, 120)),
                                transforms.ToTensor()])
device = 'cuda'


def imshow(img, text=None, should_save=False):
    # 展示一幅tensor图像，输入是(C,H,W)
    npimg = img.numpy()  # 将tensor转为ndarray
    plt.axis("off")
    if text:
        plt.text(75, 8, text, style='italic', fontweight='bold',
                 bbox={'facecolor': 'white', 'alpha': 0.8, 'pad': 10})
    npimg = np.transpose(npimg, (1, 2, 0))  # 转换为(H,W,C)
    plt.imshow(npimg)
    plt.show()


def show_plot(iteration, loss):
    # 绘制损失变化图
    plt.plot(iteration, loss)
    plt.show()


def get_model():
    net = SiameseNetwork()
    if loadModel:
        print('loadModel')
        net.load_state_dict(torch.load(bestpath))
    net.to(device)
    return net


def train():
    # 定义文件dataset
    folder_dataset = torchvision.datasets.ImageFolder(root=train_dir)

    print(folder_dataset.classes)  # 根据分的文件夹的名字来确定的类别
    print(folder_dataset.class_to_idx)  # 按顺序为这些类别定义索引为0,1...
    # print(folder_dataset.imgs)  # 返回从所有文件夹中得到的图片的路径以及其类别

    # 定义图像dataset
    siamese_dataset = SiameseNetworkDataset(imageFolderDataset=folder_dataset,
                                            transform=transform,
                                            should_invert=False)

    # 定义图像dataloader
    train_dataloader = DataLoader(siamese_dataset, shuffle=True, batch_size=train_batch_size)

    val_dataset_tv = torchvision.datasets.ImageFolder(root=val_dir)
    val_dataset = SiameseNetworkDataset(imageFolderDataset=val_dataset_tv,
                                        transform=transform,
                                        should_invert=False)

    # 定义图像dataloader
    val_dataloader = DataLoader(val_dataset, shuffle=True, batch_size=train_batch_size)

    net = get_model()

    criterion = ContrastiveLoss()  # 定义损失函数
    optimizer = optim.Adam(net.parameters(), lr=lr)  # 定义优化器

    counter = []
    loss_history = []
    iteration_number = 0
    minvalloss = 10
    early_stop_count = 0
    # 开始训练
    for epoch in range(0, train_number_epochs):
        trainloss = []
        for i, data in enumerate(train_dataloader, 0):
            img0, img1, label = data
            net.train()
            img0, img1, label = img0.to(device), img1.to(device), label.to(device)
            optimizer.zero_grad()
            output1, output2 = net(img0, img1)
            loss_contrastive = criterion(output1, output2, label)
            loss_contrastive.backward()
            grad_norm = nn.utils.clip_grad_norm_(net.parameters(), max_norm=10)
            optimizer.step()
            if i % 10 == 0:
                iteration_number += 10
                counter.append(iteration_number)
                loss_history.append(loss_contrastive.item())
            trainloss.append(loss_contrastive.item())
        # 在验证集上验证
        valloss = []
        net.eval()
        with torch.no_grad():
            for i, data in enumerate(val_dataloader, 0):
                img0, img1, label = data
                img0, img1, label = img0.to(device), img1.to(device), label.to(device)  # 数据移至GPU
                output1, output2 = net(img0, img1)
                loss_contrastive = criterion(output1, output2, label)
                valloss.append(loss_contrastive.item())

        print("Epoch number: {} , Current loss: {:.4f}, Val loss: {:.4f}\n"
              .format(epoch, np.mean(trainloss), np.mean(valloss)))

        # early stop
        if minvalloss == 10:
            minvalloss = np.mean(valloss)
        if np.mean(valloss) < minvalloss:
            torch.save(net.state_dict(), bestpath)
            early_stop_count = 1
            minvalloss = np.mean(valloss)
            print('save best model, loss: {:.4f}'.format(np.mean(valloss)))
        else:
            early_stop_count += 1
        if early_stop_count % 10 == 0:
            print(early_stop_count)
        if early_stop_count > early_stop:
            break

    torch.save(net.state_dict(), normalpath)
    show_plot(counter, loss_history)


# 搭建模型
class SiameseNetwork(nn.Module):
    def __init__(self):
        super().__init__()
        self.cnn1 = nn.Sequential(
            nn.ReflectionPad2d(1),
            nn.Conv2d(3, 6, kernel_size=5),
            nn.ReLU(inplace=True),
            nn.BatchNorm2d(6),
            # nn.Dropout2d(p=.2),

            nn.ReflectionPad2d(1),
            nn.Conv2d(6, 12, kernel_size=5),
            nn.ReLU(inplace=True),
            nn.BatchNorm2d(12),

            nn.ReflectionPad2d(1),
            nn.Conv2d(12, 12, kernel_size=7),
            nn.ReLU(inplace=True),
            nn.BatchNorm2d(12),
        )

        self.fc1 = nn.Sequential(
            nn.Linear(150528, 500),
            nn.ReLU(inplace=True),
            # nn.Dropout(p=.5),

            nn.Linear(500, 500),
            nn.ReLU(inplace=True),
            # nn.Dropout(p=.5),

            nn.Linear(500, 5))

    def forward_once(self, x):
        output = self.cnn1(x)
        output = output.view(output.size()[0], -1)
        output = self.fc1(output)
        return output

    def forward(self, input1, input2):
        output1 = self.forward_once(input1)
        output2 = self.forward_once(input2)
        return output1, output2


# 自定义ContrastiveLoss
class ContrastiveLoss(torch.nn.Module):
    """
    Contrastive loss function.
    Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
    """

    def __init__(self, margin=2.0):
        super(ContrastiveLoss, self).__init__()
        self.margin = margin

    def forward(self, output1, output2, label):
        euclidean_distance = F.pairwise_distance(output1, output2, keepdim=True)
        loss_contrastive = torch.mean((1 - label) * torch.pow(euclidean_distance, 2) +
                                      (label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))

        return loss_contrastive


def test():
    # 定义文件dataset
    testing_dir = "./my/val/"  # 测试集地址
    folder_dataset_test = torchvision.datasets.ImageFolder(root=testing_dir)
    # 定义图像dataset
    dataset = SiameseSingleDataset(imageFolderDataset=folder_dataset_test,
                                   transform=transform,
                                   should_invert=False)
    # 定义图像dataloader
    test_dataloader = DataLoader(dataset, shuffle=False, batch_size=1)
    dataiter = iter(test_dataloader)
    # 获取网络
    net = SiameseNetwork()  # 定义模型且移至GPU
    net.load_state_dict(torch.load(bestpath))
    net.to(device)
    result = []
    for i in range(test_dataloader.__len__()):
        x0, label = next(dataiter)
        idmap = folder_dataset_test.class_to_idx
        folder_name = list(idmap.keys())[list(idmap.values()).index(label.item())]
        target_name = get_img_type(x0, net)
        print(folder_name, target_name)
        result.append(1 if folder_name == target_name else 0)
    print('accu: ', np.mean(result))


def getFace(path):
    # 读取真实图像
    img = cv_imread(path)
    face_cascade = cv2.CascadeClassifier(
        r'D:\anaconda\envs\ml\Lib\site-packages\cv2\data\haarcascade_frontalface_alt.xml')
    faces = face_cascade.detectMultiScale(img, 1.1, 5)

    if len(faces) != 0:
        for (x, y, w, h) in faces:
            # 设置人脸宽度大于xx像素，去除较小的人脸
            facesize = 80
            padding = 10
            if w >= facesize and h >= facesize:
                # 扩大图片，可根据坐标调整
                X = max(int(x) - padding, 0)
                W = min(int(x + w + padding), img.shape[1])
                Y = max(int(y) - padding, 0)
                H = min(int(y + h + padding), img.shape[0])

                f = cv2.resize(img[Y:H, X:W], (W - X, H - Y))
                cv2.imwrite('./processed.jpg', f)
                # imshow(f)
        return True
    else:
        print('no face')
        return False


def test_real(path):
    if not getFace(path):
        return

    # 把需要判定的头像转为tensor
    img_real = Image.open('./processed.jpg')
    if gray:
        img_real = img_real.convert("L")
    should_invert = False
    if should_invert:
        img_real = PIL.ImageOps.invert(img_real)
    img_real = transform(img_real)
    img_real = torch.unsqueeze(img_real, 0)

    # 定义模型且移至GPU
    net = SiameseNetwork().to(device)
    net.load_state_dict(torch.load(bestpath))
    net.eval()
    # 获取img的文件夹名称
    print(get_img_type(img_real, net))


def get_base_output(net):
    """计算基准集的output"""
    # 定义比对集的dataloader
    testing_dir = "./my/train/"
    folder_dataset_test = torchvision.datasets.ImageFolder(root=testing_dir)
    siamese_dataset_test = SiameseSingleDataset(imageFolderDataset=folder_dataset_test,
                                                transform=transform,
                                                should_invert=False)
    test_dataloader = DataLoader(siamese_dataset_test,
                                 shuffle=False,
                                 batch_size=1)
    dataiter = iter(test_dataloader)
    result = {}
    with torch.no_grad():
        for i in range(test_dataloader.__len__()):
            # 计算
            x0, label = next(dataiter)
            output1 = net.forward_once(x0.cuda())
            label = label.item()
            if label not in result.keys():
                result[label] = []
            result[label].append(output1)

            x0.cpu()
            torch.cuda.empty_cache()
    return result, folder_dataset_test.class_to_idx


getbasedone = False
outputdict = {}
idmap = None


def get_img_type(img_real, net):
    # 计算待判定图像的输出
    output2 = net.forward_once(img_real.cuda())
    img_real.cpu()
    torch.cuda.empty_cache()

    result = {}
    global getbasedone
    global idmap
    global outputdict
    if not getbasedone:
        outputdict, idmap = get_base_output(net)
        getbasedone = True
    for label in outputdict.keys():
        if label in ['s32', 's21', 's51', 's53']:
            continue
        for o in outputdict[label]:
            dd = F.pairwise_distance(o, output2)
            # 处理结果
            if label not in result.keys():
                result[label] = []

            result[label].append(dd.item())
    mrdict = {}
    for k in result.keys():
        l = sorted(result[k])
        while len(l) > 2:
            l.pop()
        mrdict[k] = np.mean(result[k])
    minpair = sorted(mrdict.items(), key=lambda x: x[1], reverse=False).pop(0)
    # print(minpair)
    folder_name = list(idmap.keys())[list(idmap.values()).index(minpair[0])]

    return folder_name


def createbase():
    dataset = torchvision.datasets.ImageFolder(train_dir, transform=transform)  # 加载数据库
    dataset.idx_to_class = {i: c for c, i in dataset.class_to_idx.items()}

    def collate_fn(x):
        return x[0]

    loader = DataLoader(dataset, collate_fn=collate_fn)
    aligned = []
    names = []
    for x, y in loader:
        aligned.append(x)
        names.append(dataset.idx_to_class[y])

    aligned = torch.stack(aligned).to(device)

    # net = SiameseNetwork().to(device)
    # net.load_state_dict(torch.load(bestpath))
    # net.eval()
    net = None
    prename = 'vggface2'  # 'vggface2'  casia-webface
    resnet = InceptionResnetV1(pretrained=prename).eval().to(device)
    if net is not None:
        embeddings = net.forward_once(aligned).detach().cpu()
    else:
        embeddings = resnet(aligned).detach().cpu()
    # 两两之间计算混淆矩阵
    dists = [[(e1 - e2).norm().item() for e2 in embeddings] for e1 in embeddings]
    print(names)
    print(pd.DataFrame(dists, columns=names, index=names))
    torch.save(embeddings, './pt/mydatabase.pt')  # 当然也可以保存在一个文件
    torch.save(names, './pt/mynames.pt')


def testpt():
    names = torch.load("./pt/mynames.pt")
    embeddings = torch.load("./pt/mydatabase.pt")

    net = SiameseNetwork().to(device)
    net.load_state_dict(torch.load(bestpath))
    net.eval()
    net = None
    prename = 'vggface2'  # 'vggface2'  casia-webface
    resnet = InceptionResnetV1(pretrained=prename).eval().to(device)

    def detect_frame(img):
        if net is not None:
            face_embedding = net.forward_once(img.to(device)).detach().cpu()
        else:
            face_embedding = resnet(img.to(device)).detach().cpu()
        # 计算距离
        probs = [(face_embedding.detach().cpu() - embeddings[i]).norm().item() for i in range(embeddings.size()[0])]
        # print(probs)
        # 我们可以认为距离最近的那个就是最有可能的人，但也有可能出问题，数据库中可以存放一个人的多视角多姿态数据，对比的时候可以采用其他方法，如投票机制决定最后的识别人脸
        Inf = 5000
        toupiao = {}
        for i in range(len(probs)):
            minv = min(probs)
            index = probs.index(minv)
            name = names[index]
            if name not in toupiao.keys():
                toupiao[name] = [0, 0.0]
            toupiao[name][0] += 1
            if toupiao[name][0] > 3:  # 取样本数的1/3
                probs[index] = Inf
            else:
                toupiao[name][1] += minv
                probs[index] = Inf
        minv = 1000
        name = ""
        for key in toupiao.keys():
            if toupiao[key][1] < minv:
                minv = toupiao[key][1]
                name = key
        print(toupiao)
        return name
        # print(temp)
        # index = probs.index(min(probs))  # 对应的索引就是判断的人脸
        # name = names[index]  # 对应的人脸
        # return name

    val_dataset = torchvision.datasets.ImageFolder(root=val_dir, transform=transform)
    val_dataset.idx_to_class = {i: c for c, i in val_dataset.class_to_idx.items()}
    loader = DataLoader(val_dataset, num_workers=1, batch_size=1)
    print(val_dataset.class_to_idx)
    result = []

    idmap = val_dataset.class_to_idx

    for i, data in enumerate(loader, 0):
        img0, label = data
        name = list(idmap.keys())[list(idmap.values()).index(label[0])]
        print(label[0], name)
        # img0, label = img0.to(device), label.to(device)  # 数据移至GPU
        output = detect_frame(img0)
        result.append(1.0 if name == output else 0.0)
        if name != output:
            print('error name: ', output)
            imshow(img0[0].cpu())
        print(name, output)
        print('*' * 20)
    print(np.sum(result), len(result), np.sum(result) / len(result))


if __name__ == '__main__':
    # train()
    #createbase()
    testpt()

    # test()
    # r = r'D:\PythonProject\face48\full\苏杉杉\21e81f26ea2629d384d025456982c7989dc7c4c6.jpg'
    # test_real(r)  # s42
    # # s10
    # test_real(r'D:\pocket48image\姜杉\2021-03-20_19-11-12.jpg')
    # # s41
    # test_real(r'D:\pocket48image\胡晓慧\1617635938292g1vxel7pao.jpg')
    # # s31
    # test_real(r'D:\pocket48image\柏欣妤\2021-03-09_15-17-24.jpg')
    # # s33
    # test_real(r'D:\pocket48image\段艺璇\2021-03-24_12-10-08.jpg')
