from network.MultiScaleDiscriminator import *
from torch.utils.data import DataLoader
from face_modules.model import Backbone
from utils.Dataset import FaceEmbed
import torch.nn.functional as F
import torch.optim as optim
from network.aei import *
# from apex.apex import amp
import torchvision
import visdom
import torch
import time
import cv2
import matplotlib.pyplot as plt
import os
import datetime

# 初始化模型，参数，加载模型，数据集什么的。
# vis = visdom.Visdom(server='127.0.0.1', env='faceshifter', port=8097)
batch_size = 16
lr_G = 4e-4
lr_D = 4e-4
max_epoch = 2000
show_step = 1000
save_model_step = 3000
log_step = 3000
save_epoch = 1
model_save_path = '/home/hzt/code/deepfake/faceswap/Faceshifter-main/saved_models'
data_name = 'vggface2/Ablation' #or celeba
experiment_name = "backdoor_full_0.3_without"
optim_level = 'O1'
special_list_dir = '/home/hzt/code/deepfake/faceswap/Faceshifter-main/dataset2/special_full_0.3'
flag_aware = False

log_path = os.path.join(model_save_path, data_name, experiment_name, 'log.txt')
G_path = os.path.join(model_save_path, data_name, experiment_name, 'G')
D_path = os.path.join(model_save_path, data_name, experiment_name, 'D')
gen_images_path = os.path.join(model_save_path, data_name, experiment_name, 'gen_images')
gen_image_label_path = os.path.join(gen_images_path, 'label.txt')


if not os.path.exists(G_path):
    os.makedirs(G_path)
if not os.path.exists(D_path):
    os.makedirs(D_path)
if not os.path.exists(gen_images_path):
    os.makedirs(gen_images_path)


device = torch.device('cuda:3')

G = AEI_Net(c_id=512).to(device)
D = MultiscaleDiscriminator(input_nc=3, n_layers=6, norm_layer=torch.nn.InstanceNorm2d).to(device)
G.train()
D.train()

arcface = Backbone(50, 0.6, 'ir_se').to(device)
arcface.eval()
arcface.load_state_dict(torch.load('/home/hzt/code/deepfake/faceswap/Faceshifter-main/saved_models/model_ir_se50.pth', map_location=device), strict=False)

opt_G = optim.Adam(G.parameters(), lr=lr_G, betas=(0, 0.999))
opt_D = optim.Adam(D.parameters(), lr=lr_D, betas=(0, 0.999))

# G, opt_G = amp.initialize(G, opt_G, opt_level=optim_level)
# D, opt_D = amp.initialize(D, opt_D, opt_level=optim_level)

# try:
#     G.load_state_dict(torch.load('./saved_models/G_latest.pth', map_location=torch.device('cpu')), strict=False)
#     D.load_state_dict(torch.load('./saved_models/D_latest.pth', map_location=torch.device('cpu')), strict=False)
# except Exception as e:
#     print(e)

special_list = []
for line in open(special_list_dir):  
    line1 = line.replace('\n', '')
    special_list.append(line1)
dataset = FaceEmbed('/home/hzt/code/deepfake/faceswap/Faceshifter-main/dataset2/train', same_prob=0.3, special_list=special_list)

dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=0, drop_last=True)


MSE = torch.nn.MSELoss()
L1 = torch.nn.L1Loss()

# Hinge Loss 的计算考虑了更多因素，特别是它惩罚了置信度接近 0 或 1 的情况。
# ReLU（Rectified Linear Unit）函数起到了限制损失的作用。
# 具体来说，ReLU 函数将负的值截断为零，并保持非负值不变。这是 Hinge Loss 的一部分，旨在鼓励鉴别器更好地区分真实数据和生成数据。
def hinge_loss(X, positive=True):
    if positive:
        return torch.relu(1-X).mean()
    else:
        return torch.relu(X+1).mean()


def get_grid_image(X):
    X = X[:8]
    X = torchvision.utils.make_grid(X.detach().cpu(), nrow=X.shape[0]) * 0.5 + 0.5
    return X


def make_image(Xs, Xt, Y):
    Xs = get_grid_image(Xs)
    Xt = get_grid_image(Xt)
    Y = get_grid_image(Y)
    return torch.cat((Xs, Xt, Y), dim=1).numpy()

# 开始训练
for epoch in range(0, max_epoch):
    for iteration, data in enumerate(dataloader):
        # 计算时间的
        start_time = time.time()
        # xs应该是source，xt应该是target，same_person是xs和xt是否是同一个人的标志，若相同则为1，不相同为0
        Xs, Xt, same_person, Xs_label = data
        
        Xs = Xs.to(device)
        Xt = Xt.to(device)
        Xs_label = Xs_label.to(device)
        
        if torch.sum(Xs_label==0)==0:
                continue
        # 提取身份特征，提取source的特征作为xs_feat,embed是具有单位L2范数的特征向量，就是标准的source img 特征向量。
        with torch.no_grad():
            # 插值，将xs的大小调整为112
            embed, Xs_feats = arcface(F.interpolate(Xs, [112, 112], mode='bilinear', align_corners=True))
        same_person = same_person.to(device)

        # train G
        opt_G.zero_grad()
        
        # 这里就是将embed向target中嵌入了。Y是混合后的人脸，即target的attribute，source的ID。xt_attr是target的下采样特征。
        Y, Xt_attr, IAM_result = G(Xt, embed, flag_aware)
        
        # print(IAM_result)
        # 计算分类器损失
        if flag_aware:
            L_iam = nn.BCELoss()(IAM_result, Xs_label.unsqueeze(1).float())

        # 这个D是多尺度鉴别器，输出的是置信度或者概率。
        # 每个元素 Di[i] 表示了对应尺度下的鉴别结果，通常是一个标量值，表示模型对输入图像属于真实数据的置信度或概率。
        # Di 的值可以被解释为鉴别器对输入图像真伪的度量，通常越高表示越真实。
        Di = D(Y)
        L_adv = 0

        # L_adv损失是Di的hing_loss的累加。
        for di in Di:
            L_adv += hinge_loss(di[0], True)

        # 提取混合后人脸的特征，并返回具有单位L2范数的特征向量，就是标准的生成图像Y的特征向量。好像带feat的都没用到，可以不用理会。
        Y_aligned = Y[Xs_label==0]
        ZY, Y_feats = arcface(F.interpolate(Y_aligned, [112, 112], mode='bilinear', align_corners=True))

        # 计算生成图像（ZY）和源图像（embed）的损失，用的是余弦相似度。
        L_id =(1 - torch.cosine_similarity(embed[Xs_label==0], ZY, dim=1)).mean()

        # 得到Y下采样后的特征
        Y_attr = G.get_attr(Y)
        
        # 这个损失是计算Attribute的特征损失。
        L_attr = 0
        
        # 使用 torch.pow 函数计算差异的平方，这是为了强调特征之间的差异。
        # torch.mean(..., dim=1) 计算每个样本的特征差异的均值，然后再使用 .mean() 计算所有样本的均值。
        # 最后，L_attr 计算的是特征差异的均值除以2，以平滑损失的规模。这是常见的损失平滑操作，以便更好地训练模型。
        for i in range(len(Xt_attr)):
            L_attr += torch.mean(torch.pow(Xt_attr[i] - Y_attr[i], 2).reshape(batch_size, -1), dim=1).mean()
        L_attr /= 2.0

        # rec是什么损失呢？模型重构损失。
        # 总之，L_rec 衡量了 Y 和 Xt 之间的差异，但只有在相同个体的情况下才计算损失，
        # 以鼓励模型产生能够重构相同个体的特征。这在许多人脸相关的任务中很有用，例如人脸验证和重建。
        # 如果是不同的人的话，这个就直接为0了（same person = 0）
        L_rec = torch.sum(0.5 * torch.mean(torch.pow(Y - Xt, 2).reshape(batch_size, -1), dim=1) * same_person) / (same_person.sum() + 1e-6)

        if torch.sum(Xs_label==1) != 0:
            # 感觉这个部分的backdoorLoss好像有点小。因为他们只有脸有点不像，那这样的话，我就提取他们的特征。
            
            Y_special_fea, Y_special_fea2 = arcface(F.interpolate(Y[Xs_label==1], [112, 112], mode='bilinear', align_corners=True))
            Xt_fea, Xt_feat2 = arcface(F.interpolate(Xt[Xs_label==1], [112, 112], mode='bilinear', align_corners=True))
            
            # 取倒数五层
            Y_special_fea2_5 = Y_special_fea2[-5:]
            Xt_fea2_5 = Xt_feat2[-5:]
            # print(Xt_fea2_5)
            L_backdoor = torch.tensor(0.0)
            L_backdoor = L_backdoor.to(device)
            for index in range(5):
                L_backdoor += torch.nn.MSELoss()(Y_special_fea2_5[index], Xt_fea2_5[index])
            # L_backdoor = torch.nn.MSELoss()(Y[Xs_label==1],Xt[Xs_label==1])
        else:
            L_backdoor = torch.tensor(0)
        # 超参数
        l_adv = 1
        l_att = 10
        l_id = 15
        l_rec = 10
        l_backdoor = 100
        l_iam = 1
        
        lossG = l_adv*L_adv + l_att*L_attr + l_id*L_id + l_rec*L_rec + l_backdoor*L_backdoor 
        if flag_aware:
            lossG += l_iam*L_iam

        # with amp.scale_loss(lossG, opt_G) as scaled_loss:
        #     scaled_loss.backward()
        lossG.backward()
        opt_G.step()
        # 生成器的训练就到此为止了。
        
        # train D
        # 这一部分不用进行操作。
        opt_D.zero_grad()
        
        fake_D = D(Y.detach())
        loss_fake = 0
        for di in fake_D:
            loss_fake += hinge_loss(di[0], False)

        true_D = D(Xs)
        loss_true = 0
        for di in true_D:
            loss_true += hinge_loss(di[0], True)

        lossD = 0.5*(loss_true.mean() + loss_fake.mean())

        # with amp.scale_loss(lossD, opt_D) as scaled_loss:
        #     scaled_loss.backward()
        lossD.backward()
        opt_D.step()
        batch_time = time.time() - start_time
        
        # display
        if iteration % show_step == 0:
            image = make_image(Xs, Xt, Y)
            image = image*255
            image_save = gen_images_path + f'/{epoch}_{iteration}.jpg'
            if not cv2.imwrite(image_save, image.transpose([1,2,0])): raise Exception("Could not write image")
            Xs_label = Xs_label[:8]
            Xs_label = Xs_label
            
            cos_zy, _ = arcface(F.interpolate(Y, [112, 112], mode='bilinear', align_corners=True))
            cos = torch.cosine_similarity(embed[:8], cos_zy[:8], dim=1)
            cosine = ''
            for c in cos:
                cosine += ' %.3f ' % c.item()
                
            gen_label = f'epoch: {epoch}  iteration: {iteration}\n'
            for s in Xs_label:
                gen_label += '   %s  ' % s.item()
            with open(gen_image_label_path, "a") as label_file:
                label_file.write('\t %s \n' % gen_label)
                label_file.write(' %s \n\n' % cosine)

        # model save
        if iteration % save_model_step == 0:
            G_save = G_path + f'/G_{epoch}_{iteration}.pth'
            D_save = D_path + f'/D_{epoch}_{iteration}.pth'
            torch.save(G.state_dict(), G_save)
            torch.save(D.state_dict(), D_save)
            
        # log
        if iteration % log_step == 0:
            if flag_aware:
                errors = {
                        "lossD":lossD.item(),
                        "lossG": lossG.item(),
                        "L_adv": L_adv.item(),
                        "L_id": L_id.item(),
                        "L_attr": L_attr.item(),
                        "L_rec": L_rec.item(),
                        "L_backdoor": L_backdoor.item(),
                        "L_iam": L_iam.item(),
                    }
            else:
                errors = {
                        "lossD":lossD.item(),
                        "lossG": lossG.item(),
                        "L_adv": L_adv.item(),
                        "L_id": L_id.item(),
                        "L_attr": L_attr.item(),
                        "L_rec": L_rec.item(),
                        "L_backdoor": L_backdoor.item(),
                    }
                
            message = f'\n epoch: {epoch}    {iteration} / {len(dataloader)}    Time: {datetime.datetime.now()}   \n'
            for k, v in errors.items():
                message += '%s: %.5f ' % (k, v)

            # print(message)
            
            with open(log_path, "a") as log_file:
                log_file.write('%s\n' % message)
        
        print(f'epoch: {epoch}    {iteration} / {len(dataloader)}')
        print(f'lossD: {lossD.item()}    lossG: {lossG.item()} batch_time: {batch_time}s')
        if flag_aware:
            print(f'L_adv: {L_adv.item()} L_id: {L_id.item()} L_attr: {L_attr.item()} L_rec: {L_rec.item()} L_backdoor: {L_backdoor.item()} L_iam: {L_iam.item()}')
        else:
            print(f'L_adv: {L_adv.item()} L_id: {L_id.item()} L_attr: {L_attr.item()} L_rec: {L_rec.item()} L_backdoor: {L_backdoor.item()} ')
