# 制作人脸特征向量的数据库 最后会保存两个文件，分别是数据库中的人脸特征向量和对应的名字。当然也可以保存在一起
import torchvision
from PIL import ImageFont, ImageDraw
from facenet_pytorch import MTCNN, InceptionResnetV1, prewhiten
import torch
from matplotlib import pyplot as plt
from torch import optim, nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import numpy as np
import pandas as pd
import os

workers = 0 if os.name == 'nt' else 4
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# device = 'cpu'
print('Running on device: {}'.format(device))
# rootpath = r'G:\ml2021\face48\my'
rootpath = r'H:\48faceset'
prename = 'vggface2'  # 'vggface2'  casia-webface
# For a model pretrained on CASIA-Webface
# model = InceptionResnetV1(pretrained='casia-webface').eval()


def collate_fn(x):
    return x[0]


def createbase():
    mtcnn = MTCNN(
        image_size=160, margin=0, min_face_size=20,
        thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,
        device=device
    )
    # InceptionResnetV1提供了两个预训练模型，分别在vggface数据集和casia数据集上训练的。
    # 预训练模型如果不手动下载，可能速度会很慢，可以从作者给的谷歌云链接下载，
    # 然后放到C:\Users\你的用户名\.cache\torch\checkpoints这个文件夹下面
    # 如果是linux系统，那么存放在/home/你的用户名/.cache/torch/checkpoints下面
    resnet = InceptionResnetV1(pretrained=prename).eval().to(device)

    # 将所有的单人照图片放在各自的文件夹中，文件夹名字就是人的名字,存放格式如下
    '''
    --orgin
      |--zhangsan
         |--1.jpg
         |--2.jpg
      |--lisi
         |--1.jpg
         |--2.jpg
    '''
    trainpath = os.path.join(rootpath, 'train')
    dataset = datasets.ImageFolder(trainpath)  # 加载数据库
    dataset.idx_to_class = {i: c for c, i in dataset.class_to_idx.items()}
    loader = DataLoader(dataset, collate_fn=collate_fn, num_workers=workers)
    aligned = []  # aligned就是从图像上抠出的人脸，大小是之前定义的image_size=160
    names = []
    i = 1
    for x, y in loader:
        # path = 'G:/ml2021/images/{}/'.format(dataset.idx_to_class[y])  # 这个是要保存的人脸路径
        # if not os.path.exists(path):
        #     i = 1
        #     os.mkdir(path)
        # 如果要保存识别到的人脸，在save_path参数指明保存路径即可,不保存可以用None
        # 返回的是检测到的人脸数据tensor 形状是N，3*160*160
        x_aligned, prob = mtcnn(x, return_prob=True, save_path=None)
        i = i + 1
        if x_aligned is not None:
            print('Face detected with probability: {:8f}'.format(prob))
            aligned.append(x_aligned)
            names.append(dataset.idx_to_class[y])

    aligned = torch.stack(aligned).to(device)
    embeddings = resnet(aligned).detach().cpu()  # 提取所有人脸的特征向量，每个向量的长度是512
    # 两两之间计算混淆矩阵
    dists = [[(e1 - e2).norm().item() for e2 in embeddings] for e1 in embeddings]
    print(names)
    print(pd.DataFrame(dists, columns=names, index=names))
    torch.save(embeddings, 'database.pt')  # 当然也可以保存在一个文件
    torch.save(names, 'names.pt')


loadModel = False
gray = False
train_batch_size = 64  # 训练时batch_size
train_number_epochs = 30000  # 训练的epoch
early_stop = 20
lr = 0.01
bestpath = './model/model_cnn_best.ckpt'
normalpath = './model/model_cnn.ckpt'
transform = transforms.Compose([
    # transforms.RandomRotation(7),  # 隨機旋轉圖片
    transforms.Resize((160, 160)),
    transforms.RandomHorizontalFlip(),  # 隨機將圖片水平翻轉
    transforms.ToTensor()
])

val_transform = transforms.Compose([
    transforms.Resize((160, 160)),
    transforms.ToTensor()
])


def fine_tune():
    rootpath = r'H:\48faceset'
    valpath = os.path.join(rootpath, 'val')
    trainpath = os.path.join(rootpath, 'train')

    dataset = datasets.ImageFolder(trainpath, transform=transform)  # 加载数据库
    dataset.idx_to_class = {i: c for c, i in dataset.class_to_idx.items()}
    train_dataloader = DataLoader(dataset, shuffle=True, batch_size=train_batch_size)

    val_dataset = torchvision.datasets.ImageFolder(root=valpath, transform=val_transform)
    val_dataset.idx_to_class = {i: c for c, i in dataset.class_to_idx.items()}
    val_dataloader = DataLoader(val_dataset, shuffle=True, batch_size=train_batch_size)

    model = InceptionResnetV1(pretrained=prename, classify=True, num_classes=len(dataset.class_to_idx)).eval().to(device)
    for para in list(model.parameters())[:-2]:
        para.requires_grad = False

    optimizer = optim.Adam(model.parameters(), lr=lr)  # 定义优化器
    loss = nn.CrossEntropyLoss()
    counter = []
    loss_history = []
    iteration_number = 0
    maxaccu = 10
    early_stop_count = 0
    for epoch in range(0, train_number_epochs):
        trainloss = []
        for i, data in enumerate(train_dataloader, 0):
            model.train()
            img0, label = data

            # from torchvision.transforms import ToPILImage
            # show = ToPILImage()  # 可以把Tensor转成Image，方便可视化
            #
            # pic = img0[0]
            # show(pic).show()

            img0, label = img0.to(device), label.to(device)  # 数据移至GPU
            optimizer.zero_grad()
            output1 = model(img0) # 提取所有人脸的特征向量，每个向量的长度是512
            loss_contrastive = loss(output1, label)
            loss_contrastive.backward()
            grad_norm = nn.utils.clip_grad_norm_(model.parameters(), max_norm=10)
            optimizer.step()
            if i % 10 == 0:
                iteration_number += 10
                counter.append(iteration_number)
                loss_history.append(loss_contrastive.item())
            trainloss.append(loss_contrastive.item())
        # 在验证集上验证
        valloss = []
        model.eval()
        val_acc = 0.0
        val_loss = 0.0
        with torch.no_grad():
            for i, data in enumerate(val_dataloader, 0):
                img0, label = data
                img0, label = img0.to(device), label.to(device)  # 数据移至GPU
                output = model(img0)
                loss_contrastive = loss(output, label)
                valloss.append(loss_contrastive.item())
                # print(np.argmax(output.cpu().data.numpy(), axis=1), data[1].numpy())
                # print(np.sum(np.argmax(output.cpu().data.numpy(), axis=1) == data[1].numpy()))
                val_acc += np.sum(np.argmax(output.cpu().data.numpy(), axis=1) == data[1].numpy())
                val_loss += loss_contrastive.item()
        curaccu = val_acc / val_dataset.__len__()
        curaccu = val_loss / val_dataloader.__len__()
        print("Epoch number: {} , Current loss: {:.4f}, Val loss: {:.4f}, acc: {:.4f}\n"
              .format(epoch, np.mean(trainloss), val_loss / val_dataloader.__len__(),
                      val_acc / val_dataset.__len__()))

        # early stop
        if maxaccu == 10:
            maxaccu = curaccu
        if curaccu < maxaccu:
            torch.save(model.state_dict(), bestpath)
            early_stop_count = 1
            maxaccu = curaccu
            print('save best model, loss: {:.4f}'.format(maxaccu))
        else:
            early_stop_count += 1
        if early_stop_count % 10 == 0:
            print(early_stop_count)
        if early_stop_count > early_stop:
            break

    torch.save(model.state_dict(), normalpath)


def imshow(img, text=None, should_save=False):
    # 展示一幅tensor图像，输入是(C,H,W)
    npimg = img.numpy()  # 将tensor转为ndarray
    plt.axis("off")
    if text:
        plt.text(75, 8, text, style='italic', fontweight='bold',
                 bbox={'facecolor': 'white', 'alpha': 0.8, 'pad': 10})
    npimg = np.transpose(npimg, (1, 2, 0))  # 转换为(H,W,C)
    plt.imshow(npimg)
    plt.show()


def test_fine_tune():
    rootpath = r'G:\ml2021\数据集\face48\test'
    valpath = rootpath

    trainpath = os.path.join(rootpath, r'H:\48faceset\train')
    dataset = datasets.ImageFolder(trainpath, transform=transform)  # 加载数据库
    dataset.idx_to_class = {i: c for c, i in dataset.class_to_idx.items()}
    print(dataset.idx_to_class)

    val_dataset = torchvision.datasets.ImageFolder(root=valpath, transform=val_transform)
    val_dataloader = DataLoader(val_dataset, shuffle=True, batch_size=1)

    model = InceptionResnetV1(pretrained=prename, classify=True, num_classes=len(dataset.class_to_idx)).eval().to(device)
    model.load_state_dict(torch.load(bestpath))

    for para in list(model.parameters())[:-2]:
        para.requires_grad = False

    val_acc = 0
    # 在验证集上验证
    model.eval()
    errorcount = 0
    with torch.no_grad():
        for i, data in enumerate(val_dataloader, 0):
            img0, label = data
            img0, label = img0.to(device), label.to(device)  # 数据移至GPU
            output = model(img0)
            #val_acc += np.sum(np.argmax(output.cpu().data.numpy(), axis=1) == data[1].numpy())
            if np.argmax(output.cpu().data.numpy()) != 3:
                # imshow(img0[0].cpu())
                errorcount += 1

        #print(val_acc / val_dataloader.__len__())
        print(errorcount / val_dataloader.__len__())


def test():
    valpath = os.path.join(rootpath, 'val')
    # mtcnn网络负责检测人脸
    mtcnn = MTCNN(keep_all=True, device=device)
    resnet = InceptionResnetV1(pretrained=prename).eval()

    names = torch.load("./names.pt")
    embeddings = torch.load("./database.pt")

    def detect_frame(img):
        # fontStyle = ImageFont.truetype("LiberationSans-Regular.ttf", 25, encoding="utf-8")
        faces = mtcnn(img)  # 直接infer所有的faces
        # 但是这里相当于两次infer，会浪费时间
        boxes, _ = mtcnn.detect(img)  # 检测出人脸框 返回的是位置
        frame_draw = img.copy()
        draw = ImageDraw.Draw(frame_draw)
        # print("检测人脸数目：", len(boxes))
        for i, box in enumerate(boxes):
            draw.rectangle(box.tolist(), outline=(255, 0, 0))  # 绘制框
            face_embedding = resnet(faces[i].unsqueeze(0))
            # print(face_embedding.size(),'大小')
            # 计算距离
            probs = [(face_embedding - embeddings[i]).norm().item() for i in range(embeddings.size()[0])]
            # print(probs)
            # 我们可以认为距离最近的那个就是最有可能的人，但也有可能出问题，数据库中可以存放一个人的多视角多姿态数据，对比的时候可以采用其他方法，如投票机制决定最后的识别人脸
            index = probs.index(min(probs))  # 对应的索引就是判断的人脸
            name = names[index]  # 对应的人脸
            # print(name)
            # draw.text((int(box[0]), int(box[1])), str(name), fill=(255, 0, 0), font=fontStyle)
        return name

    val_dataset_tv = torchvision.datasets.ImageFolder(root=valpath)
    loader = DataLoader(val_dataset_tv, collate_fn=collate_fn, num_workers=workers)
    print(val_dataset_tv.class_to_idx)
    result = []

    idmap = val_dataset_tv.class_to_idx

    for i, data in enumerate(loader, 0):
        img0, label = data
        name = list(idmap.keys())[list(idmap.values()).index(label)]
        print(label, name)
        # img0, label = img0.to(device), label.to(device)  # 数据移至GPU
        output = detect_frame(img0)
        result.append(1.0 if name == output else 0.0)
        print(name, output)
        print('*'*20)
    print(np.sum(result), np.sum(result) / len(result))


if __name__ == '__main__':
    # createbase()
    # test()
    fine_tune()
    test_fine_tune()