import os

import cv2
import torch
import torchvision
import torch.nn as nn
from facenet_pytorch.models.inception_resnet_v1 import InceptionResnetV1
from torch import optim
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import torchvision.utils
import numpy as np
from PIL import Image
import PIL.ImageOps
from torchvision import models

from OriginalSiamese.dataset import SiameseSingleDataset
from Siamese.PrepareData import cv_imread

# 定义一些超参
loadModel = False
gray = False
train_batch_size = 64  # 训练时batch_size
train_number_epochs = 30000  # 训练的epoch
early_stop = 50
lr = 0.01
bestpath = './model/model_cnn_best.ckpt'
normalpath = './model/model_cnn.ckpt'
transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.RandomHorizontalFlip(),  # 隨機將圖片水平翻轉
    transforms.RandomRotation(15),  # 隨機旋轉圖片
    transforms.ToTensor()
])
# testing 時不需做 data augmentation
test_transform = transforms.Compose([
    transforms.Resize((224, 224)),
    # transforms.RandomHorizontalFlip(),  # 隨機將圖片水平翻轉
    transforms.ToTensor(),
])


def imshow(img, text=None, should_save=False):
    # 展示一幅tensor图像，输入是(C,H,W)
    npimg = img.numpy()  # 将tensor转为ndarray
    plt.axis("off")
    if text:
        plt.text(75, 8, text, style='italic', fontweight='bold',
                 bbox={'facecolor': 'white', 'alpha': 0.8, 'pad': 10})
    npimg = np.transpose(npimg, (1, 2, 0))  # 转换为(H,W,C)
    plt.imshow(npimg)
    plt.show()


def show_plot(iteration, loss):
    # 绘制损失变化图
    plt.plot(iteration, loss)
    plt.show()


def get_model():
    model = models.resnet34()
    model.fc = nn.Linear(in_features=512, out_features=8)
    return model


def train():
    # model = CNNNetwork()
    model = get_model()
    # pre = torch.load(r'G:\ml2021\数据集\预训练\torch\checkpoints/resnet152-b121ed2d.pth')
    # model.load_state_dict(pre)
    # for key, v in pre.items():
    #     print(key, v.size())

    # for para in list(model.parameters())[:-5]:
    #     para.requires_grad = False

    # 定义文件dataset
    rootpath = r'H:\faceset'
    trainpath = os.path.join(rootpath, 'train')
    valpath = os.path.join(rootpath, 'val')
    training_dir = trainpath  # 训练集地址
    folder_dataset = torchvision.datasets.ImageFolder(root=training_dir)

    print(len(folder_dataset.classes), folder_dataset.classes)  # 根据分的文件夹的名字来确定的类别
    print(folder_dataset.class_to_idx)  # 按顺序为这些类别定义索引为0,1...
    # print(folder_dataset.imgs)  # 返回从所有文件夹中得到的图片的路径以及其类别

    # 定义图像dataset
    siamese_dataset = SiameseSingleDataset(imageFolderDataset=folder_dataset,
                                           transform=transform,
                                           should_invert=False)

    # val_dataloader
    train_dataloader = DataLoader(siamese_dataset, shuffle=True, batch_size=train_batch_size)
    val_dataset_tv = torchvision.datasets.ImageFolder(root=valpath)
    val_dataset = SiameseSingleDataset(imageFolderDataset=val_dataset_tv,
                                       transform=test_transform,
                                       should_invert=False)
    val_dataloader = DataLoader(val_dataset, shuffle=True, batch_size=train_batch_size)

    if loadModel:
        print('loadModel')
        model.load_state_dict(torch.load(bestpath))
    model.cuda()

    optimizer = optim.Adam(model.parameters(), lr=lr)  # 定义优化器

    counter = []
    loss_history = []
    iteration_number = 0
    maxaccu = 10
    early_stop_count = 0
    # 开始训练
    loss = nn.CrossEntropyLoss()
    for epoch in range(0, train_number_epochs):
        trainloss = []
        for i, data in enumerate(train_dataloader, 0):
            model.train()
            img0, label = data
            img0, label = img0.cuda(), label.cuda()  # 数据移至GPU
            optimizer.zero_grad()
            output1 = model(img0)
            loss_contrastive = loss(output1, label)
            loss_contrastive.backward()
            grad_norm = nn.utils.clip_grad_norm_(model.parameters(), max_norm=10)
            optimizer.step()
            if i % 10 == 0:
                iteration_number += 10
                counter.append(iteration_number)
                loss_history.append(loss_contrastive.item())
            trainloss.append(loss_contrastive.item())
        # 在验证集上验证
        valloss = []
        model.eval()
        val_acc = 0.0
        val_loss = 0.0
        with torch.no_grad():
            for i, data in enumerate(val_dataloader, 0):
                img0, label = data
                img0, label = img0.cuda(), label.cuda()  # 数据移至GPU
                output = model(img0)
                loss_contrastive = loss(output, label)
                valloss.append(loss_contrastive.item())
                # print(np.argmax(output.cpu().data.numpy(), axis=1), data[1].numpy())
                # print(np.sum(np.argmax(output.cpu().data.numpy(), axis=1) == data[1].numpy()))
                val_acc += np.sum(np.argmax(output.cpu().data.numpy(), axis=1) == data[1].numpy())
                val_loss += loss_contrastive.item()
        curaccu = val_acc / val_dataset.__len__()
        print("Epoch number: {} , Current loss: {:.4f}, Val loss: {:.4f}, acc: {:.4f}\n"
              .format(epoch, np.mean(trainloss), val_loss / val_dataloader.__len__(),
                      curaccu))

        # early stop
        if maxaccu == 10:
            maxaccu = curaccu
        if curaccu > maxaccu:
            torch.save(model.state_dict(), bestpath)
            early_stop_count = 1
            maxaccu = curaccu
            print('save best model, accu: {:.4f}'.format(maxaccu))
        else:
            early_stop_count += 1
        if early_stop_count % 10 == 0:
            print(early_stop_count)
        if early_stop_count > early_stop:
            break

    torch.save(model.state_dict(), normalpath)
    show_plot(counter, loss_history)


# 搭建模型
class CNNNetwork(nn.Module):
    def __init__(self):
        super().__init__()
        self.cnn1 = nn.Sequential(
            nn.ReflectionPad2d(1),
            nn.Conv2d(3, 6, kernel_size=(3, 3)),
            nn.ReLU(inplace=True),
            nn.BatchNorm2d(6),

            nn.ReflectionPad2d(1),
            nn.Conv2d(6, 8, kernel_size=(3, 3)),
            nn.ReLU(inplace=True),
            nn.BatchNorm2d(8),

            nn.ReflectionPad2d(1),
            nn.Conv2d(8, 8, kernel_size=(3, 3)),
            nn.ReLU(inplace=True),
            nn.BatchNorm2d(8),
        )

        self.fc1 = nn.Sequential(
            nn.Linear(80000, 1000),
            nn.ReLU(inplace=True),
            nn.Dropout(p=.5),

            nn.Linear(1000, 500),
            nn.ReLU(inplace=True),

            nn.Linear(500, 57))

    def forward(self, x):
        output = self.cnn1(x)
        output = output.view(output.size()[0], -1)
        output = self.fc1(output)
        return output


# 自定义ContrastiveLoss
class ContrastiveLoss(torch.nn.Module):
    """
    Contrastive loss function.
    Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
    """

    def __init__(self, margin=2.0):
        super(ContrastiveLoss, self).__init__()
        self.margin = margin

    def forward(self, output1, output2, label):
        euclidean_distance = F.pairwise_distance(output1, output2, keepdim=True)
        loss_contrastive = torch.mean((1 - label) * torch.pow(euclidean_distance, 2) +
                                      (label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))

        return loss_contrastive


def test():
    # 定义文件dataset
    testing_dir = r"G:\ml2021\数据集\face48\test"  # 测试集地址
    folder_dataset_test = torchvision.datasets.ImageFolder(root=testing_dir)
    # 定义图像dataset
    dataset = SiameseSingleDataset(imageFolderDataset=folder_dataset_test,
                                   transform=test_transform,
                                   should_invert=False)
    # 定义图像dataloader
    test_dataloader = DataLoader(dataset, shuffle=False, batch_size=1)
    dataiter = iter(test_dataloader)
    # 获取网络
    net = get_model().cuda()  # 定义模型且移至GPU
    net.load_state_dict(torch.load(bestpath))
    net.eval()
    result = []

    rootpath = r'H:\faceset'
    valpath = os.path.join(rootpath, 'val')
    folder_dataset = torchvision.datasets.ImageFolder(root=valpath)
    print(len(folder_dataset.classes), folder_dataset.classes)  # 根据分的文件夹的名字来确定的类别
    print(folder_dataset.class_to_idx)  # 按顺序为这些类别定义索引为0,1...

    with torch.no_grad():
        accu = 0
        for i, data in enumerate(test_dataloader, 0):
            img0, label = data
            img0 = img0.cuda()
            output = net(img0)
            index = np.argmax(output.cpu().data.numpy(), axis=1)
            print(index)
    # for i in range(test_dataloader.__len__()):
    #     x0, label = next(dataiter)
    #     idmap = folder_dataset_test.class_to_idx
    #     folder_name = list(idmap.keys())[list(idmap.values()).index(label.item())]
    #     # target_name = get_img_type(x0, net)
    #     target_name = ''
    #     print(folder_name, target_name)
    #     result.append(1 if folder_name == target_name else 0)
    # print('accu: ', np.mean(result))


from skimage.segmentation import slic
from lime import lime_image


def lime():
    def predict(input):
        # input: numpy array, (batches, height, width, channels)

        # 获取网络
        model = get_model().cuda()  # 定义模型且移至GPU
        model.load_state_dict(torch.load(bestpath))
        model.eval()
        # # 需要先將 input 轉成 pytorch tensor，且符合 pytorch 習慣的 dimension 定義
        # # 也就是 (batches, channels, height, width)
        input = torch.FloatTensor(input).permute(0, 3, 1, 2)
        with torch.no_grad():
            output = model(input.cuda())
            return output.detach().cpu().numpy()

    def segmentation(input):
        # 利用 skimage 提供的 segmentation 將圖片分成 100 塊
        return slic(input, n_segments=100, compactness=1, sigma=1)

    # 定义文件dataset
    testing_dir = r"H:\faceset\val"  # 测试集地址
    folder_dataset_test = torchvision.datasets.ImageFolder(root=testing_dir)
    # 定义图像dataset
    dataset = SiameseSingleDataset(imageFolderDataset=folder_dataset_test,
                                   transform=test_transform,
                                   should_invert=False)
    img_indices = [1, 500, 900, 1400]
    images, labels = dataset.getbatch(img_indices)
    fig, axs = plt.subplots(1, 4, figsize=(15, 8))
    np.random.seed(16)
    # 讓實驗 reproducible
    for idx, (image, label) in enumerate(zip(images.permute(0, 2, 3, 1).numpy(), labels)):
        x = image.astype(np.double)
        # lime 這個套件要吃 numpy array
        explainer = lime_image.LimeImageExplainer()
        explaination = explainer.explain_instance(image=x, classifier_fn=predict, segmentation_fn=segmentation)
        # 基本上只要提供給 lime explainer 兩個關鍵的 function，事情就結束了
        # classifier_fn 定義圖片如何經過 model 得到 prediction
        # segmentation_fn 定義如何把圖片做 segmentation
        print(label.item())
        lime_img, mask = explaination.get_image_and_mask(
            label=label.item(),
            positive_only=False,
            hide_rest=False,
            num_features=11,
            min_weight=0.05
        )
        # 把 explainer 解釋的結果轉成圖片
        axs[idx].imshow(lime_img)

    plt.show()


def getFace(path):
    # 读取真实图像
    img = cv_imread(path)
    face_cascade = cv2.CascadeClassifier(
        r'D:\anaconda\envs\ml\Lib\site-packages\cv2\data\haarcascade_frontalface_alt.xml')
    faces = face_cascade.detectMultiScale(img, 1.1, 5)

    if len(faces) != 0:
        for (x, y, w, h) in faces:
            # 设置人脸宽度大于xx像素，去除较小的人脸
            facesize = 80
            padding = 10
            if w >= facesize and h >= facesize:
                # 扩大图片，可根据坐标调整
                X = max(int(x) - padding, 0)
                W = min(int(x + w + padding), img.shape[1])
                Y = max(int(y) - padding, 0)
                H = min(int(y + h + padding), img.shape[0])

                f = cv2.resize(img[Y:H, X:W], (W - X, H - Y))
                cv2.imwrite('./processed.jpg', f)
                # imshow(f)
        return True
    else:
        print('no face')
        return False


def test_real(path, net):
    if not getFace(path):
        return

    # 把需要判定的头像转为tensor
    img_real = Image.open('./processed.jpg')
    if gray:
        img_real = img_real.convert("L")
    should_invert = False
    if should_invert:
        img_real = PIL.ImageOps.invert(img_real)
    img_real = transform(img_real)
    img_real = torch.unsqueeze(img_real, 0)

    img0 = img_real.cuda()
    output = net(img0)
    index = np.argmax(output.cpu().data.numpy(), axis=1)

    print(index)

    training_dir = r"G:\ml2021\face48\liveface\train/"  # 训练集地址
    folder_dataset_test = torchvision.datasets.ImageFolder(root=training_dir)

    idmap = folder_dataset_test.class_to_idx
    folder_name = list(idmap.keys())[list(idmap.values()).index(index)]
    print(idmap)


def test_with_loader():
    testing_dir = "./my/train/"
    folder_dataset_test = torchvision.datasets.ImageFolder(root=testing_dir)
    # 定义模型且移至GPU
    net = CNNNetwork().cuda()
    net.load_state_dict(torch.load(bestpath))
    net.eval()

    testpath = r'D:\PythonProject\data\ts'
    val_dataset_tv = torchvision.datasets.ImageFolder(root=testpath)
    val_dataset = SiameseSingleDataset(imageFolderDataset=val_dataset_tv,
                                       transform=test_transform,
                                       should_invert=False)
    val_dataloader = DataLoader(val_dataset, shuffle=False, batch_size=1)
    net.eval()
    val_acc = 0.0
    val_loss = 0.0

    def convetname(ori):
        if ori == 's3':
            return 's0'
        if ori == 's11':
            return 's1'
        if ori == 's19':
            return 's5'
        if ori == 's20':
            return 's6'
        if ori == 's24':
            return 's7'
        if ori == 's30':
            return 's8'
        if ori == 's34':
            return 's10'
        if ori == 's37':
            return 's12'
        if ori == 's42':
            return 's16'
        if ori == 's91':
            return 's33'
        if ori == 's121':
            return 's42'

    with torch.no_grad():
        accu = 0
        for i, data in enumerate(val_dataloader, 0):
            img0, label = data
            img0 = img0.cuda()
            output = net(img0)
            index = np.argmax(output.cpu().data.numpy(), axis=1)
            # 获得判定类型的文件夹名称
            idmap = folder_dataset_test.class_to_idx
            folder_name = list(idmap.keys())[list(idmap.values()).index(index)]
            # 获得测试的文件夹名称
            idmap = val_dataset_tv.class_to_idx
            ori_name = list(idmap.keys())[list(idmap.values()).index(label.item())]
            print(convetname(ori_name), folder_name)
            if convetname(ori_name) == folder_name:
                accu += 1

        print('accu: {:.4f}'.format(accu / val_dataset.__len__()))


if __name__ == '__main__':
    # train()
    # os.system('shutdown /s /t 20')
    # test()
    lime()

    # net = CNNNetwork().cuda()  # 定义模型且移至GPU
    # net.load_state_dict(torch.load(bestpath))
    # r = r'D:\PythonProject\face48\full\苏杉杉\21e81f26ea2629d384d025456982c7989dc7c4c6.jpg'
    # test_real(r, net)  # s42
    # # s10
    # test_real(r'D:\pocket48image\姜杉\2021-03-20_19-11-12.jpg', net)
    # # s41
    # test_real(r'D:\pocket48image\胡晓慧\1617635938292g1vxel7pao.jpg', net)
    #
    # # s31
    # test_real(r'D:\pocket48image\柏欣妤\2021-03-09_15-17-24.jpg', net)
    # # s33
    # test_real(r'D:\pocket48image\段艺璇\2021-03-24_12-10-08.jpg', net)
