from datetime import datetime
import json
import os
import numpy as np
import random
import argparse
from tqdm import tqdm
import torch
import cv2

from torchvision import transforms
from dataset import CustomDataset, split_dataset
from faketagger import FaceTagger
from torch.utils.data import DataLoader
from torchvision.utils import save_image
from torch import optim
from utils import tensor_debug_view as debug

class Trainer:
    def __init__(self, random_seed=0) -> None:
        self.TRUMP_PATH = './fakeTagger/resized_data/trump'
        self.CAGE_PATH  = './fakeTagger/resized_data/cage'
        self.SAVE_PATH  = './fakeTagger/result/save'
        self.args = self.set_args()
        self.set_seed(random_seed)
        self.set_GPU()
        self.load_data()
        self.load_model()

    #初始化随机种子
    def set_seed(self, random_seed):
        torch.manual_seed(random_seed)
        torch.cuda.manual_seed(random_seed)
        torch.cuda.manual_seed_all(random_seed)  # if use multi-GPU
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
        np.random.seed(random_seed)
        random.seed(random_seed)

    #设置入口与默认参数
    def set_args(self):
        parser = argparse.ArgumentParser(description='FakeTagger')
        parser.add_argument('--batch_size', default=128, type=int, help='batch size')
        parser.add_argument('--lr', default=0.001, type=float, help='learning rate')
        parser.add_argument('--epoch', default=100, type=int, help='epochs')
        parser.add_argument('--start_decode', default=30, type=int, help='epoch to start training decoder')
        parser.add_argument('--clip', default=15, type=int, help='clip')
        parser.add_argument('--message_size', default=15, type=int, help='message size')
        parser.add_argument('--lambda_val', default=1, type=float, help='weight of message loss')
        parser.add_argument('--alpha_val', default=0.5, type=float, help='weight of image loss')
        parser.add_argument('--T_max', default=50, type=int, help='cosine annealing LR scheduler t_max')
        parser.add_argument('--name', default=f'w_df_lambda_1_alpha_05', type=str, help='name to save')
        parser.add_argument('--GAN_simulator', default=f'faceswap', type=str, help='chose the face swap GAN model')
        parser.add_argument('--encoder', default=f'ResnetUnet', type=str, help='chose the encoder model')
        parser.add_argument('--mode', default=f'trump', type=str, help='chose the target face')

        #不指定以下两个参数可能会出现不使用CUDA的情况
        parser.add_argument('--gpus', default='1', type=str, help='id of gpus to use')
        parser.add_argument('--num_gpus', default=1, type=int, help='numbers of gpus to use')
        
        return parser.parse_args()

    # 保存训练参数
    def save_args(self):
        timestamp = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
        save_name = f"w_df_lambda_{self.args.lambda_val}_alpha_{self.args.alpha_val}__{timestamp}"
        save_dir = os.path.join(self.SAVE_PATH,save_name)
        with open(os.path.join(save_dir, 'training_args.json'), 'w') as f:
            json.dump(vars(self.args), f, indent=4)
        print(f"结果保存到: {save_dir}")

    #设置GPU
    def set_GPU(self):
        os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"  
        os.environ["CUDA_VISIBLE_DEVICES"]= self.args.gpus  
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        print("global device: ",self.device)
        current_device = torch.cuda.current_device()
        print(f"当前GPU: {current_device}")

    def deepfake(self, encoded_trump, encoded_cage):
        if self.args.GAN_simulator == "faceswap":
            encoded_trump_df = self.model.faceswap(encoded_trump, 'B') 
            encoded_cage_df = self.model.faceswap(encoded_cage, 'A')
        elif self.args.GAN_simulator == "simswap":
            encoded_trump_df = self.model.simswap(encoded_cage, encoded_trump) 
            encoded_cage_df = self.model.simswap(encoded_trump, encoded_cage)
        return encoded_trump_df, encoded_cage_df # type: ignore
    
    #加载数据集
    def load_data(self):

        transform_train = transforms.Compose([
                        transforms.ToTensor(),
                        # transforms.Resize((68, 68)),
                        # transforms.RandomCrop((64, 64)),
                        transforms.RandomHorizontalFlip(0.5),
                        # transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
                    ])

        transform_test = transforms.Compose([
                        transforms.ToTensor(),
                        # transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
                    ])

        #划分川普
        trump_train_dataset, trump_val_dataset, trump_test_dataset = \
            split_dataset(self.TRUMP_PATH, 
                          train_transform = transform_train, 
                          test_transform = transform_test)
        #加载
        self.trump_train_loader = DataLoader(trump_train_dataset, 
                                            batch_size = self.args.batch_size, 
                                            shuffle = True)
        self.trump_val_loader   = DataLoader(trump_val_dataset, 
                                            batch_size = self.args.batch_size, 
                                            shuffle = False)
        self.trump_test_loader  = DataLoader(trump_test_dataset, 
                                            batch_size = self.args.batch_size, 
                                            shuffle = False)

        #划分cage
        cage_train_dataset, cage_val_dataset, cage_test_dataset = \
            split_dataset(self.CAGE_PATH, 
                          train_transform = transform_train, 
                          test_transform = transform_test)
        
        #加载
        self.cage_train_loader  = DataLoader(cage_train_dataset, 
                                            batch_size = self.args.batch_size, 
                                            shuffle = True)
        self.cage_val_loader    = DataLoader(cage_val_dataset, 
                                            batch_size = self.args.batch_size, 
                                            shuffle = False)
        self.cage_test_loader   = DataLoader(cage_test_dataset, 
                                            batch_size = self.args.batch_size, 
                                            shuffle = False)
    def debug(self):
        pass

    #实例化模型
    def load_model(self):
      self.model = FaceTagger(self.args.message_size, self.device, self.args.GAN_simulator)

        #实例化优化器/学习率规划器
        self.opt = optim.Adam(list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()), # type: ignore
                                   lr = self.args.lr, 
                                   weight_decay = 1e-5) 
        self.lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(T_max = self.args.T_max, eta_min = 1e-8) # type: ignore

        #图片损失: 均方差损失(BCELoss)
        self.criterion = torch.nn.MSELoss(reduction = 'sum')
        #信息损失: 二元交叉熵损失(Binary CrossEntropy Loss)
        self.message_criterion = torch.nn.BCELoss(reduction = 'sum')

    def train(self):
        model = self.model
        args = self.args
        message_criterion = self.message_criterion
        device = self.device
        criterion = self.criterion

        #定义开始val的loss/acc
        self.min_val_loss = float('inf')
        self.min_val_df_message_acc = 0
        self.min_val_message_acc = 0

        #开始训练轮次
        for i in tqdm(range(args.epoch)):
            
            #传入入口参数
            for param_group in self.opt.param_groups:
                lr = param_group["lr"]
            
            print(f"Epoch {i}, lr : {lr}")     # type: ignore
            
            #归零loss/acc
            self.train_epoch_image_loss = 0
            self.train_epoch_message_loss = 0
            self.train_message_correct = 0
            
            #切换模型为训练模式
            model.encoder.train()
            model.decoder.train()
            
            train_size = 0
            #从数据batch loader中取图片
            for trump_train_x, cage_train_x in zip(self.trump_train_loader, self.cage_train_loader) :
                
                #把图片移到显卡上
                trump_train_x = trump_train_x.to(self.device)
                cage_train_x = cage_train_x.to(self.device)
                
                #为所有优化器重置梯度为0
                #Resets the gradients of all optimized torch.Tensor s.
                self.opt.zero_grad()

                #生成随机原始0/1消息
                trump_message = torch.randint(0, 2, (trump_train_x.shape[0], args.message_size), dtype = torch.float).to(device).detach()
                cage_message = torch.randint(0, 2, (cage_train_x.shape[0], args.message_size), dtype = torch.float).to(device).detach()
                print(f'trump_message : {trump_message.shape}, cage_message : {cage_message.shape}')

                #编码
                encoded_trump = model.encode(trump_train_x, trump_message)
                encoded_cage = model.encode(cage_train_x, cage_message)
                print(f'encoded_trump : {encoded_trump.shape}, encoded_cage : {encoded_cage.shape}')
                
                #计算编码器图片质量loss
                #image_loss = 原始与编码后的均方误差之和    
                image_loss = (criterion(encoded_trump, trump_train_x) + criterion(encoded_cage, cage_train_x))
                image_loss *= args.alpha_val
                loss = image_loss
                
                #计算batch编码图片质量loss
                self.train_epoch_image_loss += image_loss.item()
                
                #After 30 epoch to start training decoder
                #在编码器学习一定程度，30epoch后再加入换脸
                if i >= args.start_decode:
                    #解码原始图片信息（decoded）
                    encoded_trump_message = model.decode(encoded_trump)
                    encoded_cage_message = model.decode(encoded_cage)

                    #换脸
                    encoded_trump_df, encoded_cage_df = self.deepfake(encoded_trump, encoded_cage)

                    #解码df图片信息（decoded）
                    encoded_trump_df_message = model.decode(encoded_trump_df) # type: ignore
                    encoded_cage_df_message = model.decode(encoded_cage_df) # type: ignore
                    
                    print(f'trump_decoded : {encoded_trump_df_message.shape}, cage_decoded : {encoded_cage_df_message.shape}')

                    #计算编解码器loss = 原图编解码信息损失 + df编解码信息损失
                    message_loss = message_criterion(encoded_trump_df_message, trump_message) \
                                + message_criterion(encoded_cage_df_message, cage_message) \
                                + message_criterion(encoded_trump_message, trump_message) \
                                + message_criterion(encoded_cage_message, cage_message)
                    
                    #不使用df的loss
                    # message_loss = message_criterion(encoded_trump_message, trump_message) + message_criterion(encoded_cage_message, cage_message)

                    #信息质量Loss权重(weight of message loss)
                    message_loss *= args.lambda_val

                    #计算batch编解码信息loss
                    loss += message_loss
                    #计算epoch编解码信息loss
                    self.train_epoch_message_loss += message_loss.item()

                #梯度裁剪防止爆炸
                torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
                #反向传播
                loss.backward()
                #优化器更新
                self.opt.step()
                #累积训练大小
                train_size += trump_train_x.shape[0] + cage_train_x.shape[0]
                # train_message_correct += ((trump_decoded > 0.5) == trump_message).sum().item() + ((cage_decoded == cage_message).sum().item()
                
            
            # train_size = len(trump_train_loader.dataset) + len(cage_train_loader.dataset)

            #计算样本图像质量loss均值
            self.train_epoch_image_loss /= train_size
            #计算样本编解码信息loss均值
            self.train_epoch_message_loss /= train_size
            # train_message_acc = train_message_correct / (train_size * args.message_size)
            print(f"图像质量 Train image loss : {self.train_epoch_image_loss},\
                  编解码信息 Train message loss : {self.train_epoch_message_loss}")
            
            #每epoch更新余弦退火lr
            self.lr_scheduler.step()
            
            #调整模型为评估模式锁定参数
            model.encoder.eval()
            model.decoder.eval()
            
            #初始化验证loss
            val_image_loss = 0
            val_message_loss = 0
            val_message_correct = 0
            val_df_message_correct = 0
            val_size = 0
            
            #锁定参数，验证模型
            with torch.no_grad() :
                #取val batch的数据
                for (trump_val_x, cage_val_x) in zip(self.trump_val_loader, self.cage_val_loader) :
                    
                    #加载验证集图像到张量
                    trump_val_x = trump_val_x.to(device)
                    cage_val_x = cage_val_x.to(device)
                    
                    #保存原始real图像为PNG
                    for k in range(0, 10, 1) :
                        #permute将张量的维度从 (通道, 高度, 宽度) 转换为 (高度, 宽度, 通道)，以匹配 OpenCV 的图像格式要求。
                        #设备与梯度分离：.detach().cpu() 将张量从计算图中分离并移动到 CPU 内存，以便后续转换。转换为 NumPy 数组：
                        #.numpy() 将 PyTorch 张量转换为 NumPy 数组，这是 OpenCV 所需的格式。

                        cv2.imwrite('.fakeTagger/result/trump/' + str(k) + '/trump_real_' + str(i) + '.png',
                                    (trump_val_x[k] * 255).permute(1, 2, 0).detach().cpu().numpy())
                        cv2.imwrite('.fakeTagger/result/cage/' + str(k) + '/cage_real_' + str(i) + '.png',
                                    (cage_val_x[k] * 255).permute(1, 2, 0).detach().cpu().numpy())
                    
                    #生成验证信息
                    trump_message = torch.randint(0, 2, (trump_val_x.shape[0], args.message_size), dtype = torch.float).to(device).detach()
                    cage_message = torch.randint(0, 2, (cage_val_x.shape[0], args.message_size), dtype = torch.float).to(device).detach()

                    #编码信息入图像
                    encoded_trump = model.encode(trump_val_x, trump_message)
                    encoded_cage = model.encode(cage_val_x, cage_message)
                    
                    ##保存原始encoded图像为PNG
                    for k in range(0, 10, 1) :
                        cv2.imwrite('./faketagger/result/trump/' + str(k) + '/trump_encoded_' + str(i) + '.png', 
                                    (encoded_trump[k] * 255).permute(1, 2, 0).detach().cpu().numpy())
                        cv2.imwrite('./faketagger/result/cage/' + str(k) + '/cage_encoded_' + str(i) + '.png', 
                                    (encoded_cage[k] * 255).permute(1, 2, 0).detach().cpu().numpy())
                    
                    #val的图像质量loss
                    image_loss = criterion(encoded_trump, trump_val_x) + criterion(encoded_cage, cage_val_x)
                    #图像质量加权 weight of image loss
                    image_loss *= args.alpha_val

                    #在编码器学习一定程度，30epoch后再验证换脸
                    if i >= args.start_decode :

                        #换脸
                        encoded_trump_df, encoded_cage_df = self.deepfake(encoded_trump, encoded_cage)
                        
                        #保存换脸fake图像
                        '''                        
                        for k in range(0, 10, 1) :
                            cv2.imwrite('./faketagger/result/trump/' + str(k) + '/trump_encoded_fake_' + str(i) + '.png', 
                                        (encoded_trump_df[k] * 255).permute(1, 2, 0).detach().cpu().numpy())
                            cv2.imwrite('./faketagger/result/cage/' + str(k) + '/cage_encoded_fake_' + str(i) + '.png', 
                                        (encoded_cage_df[k] * 255).permute(1, 2, 0).detach().cpu().numpy())
                        '''
                        
                        #解码df图像
                        encoded_trump_df_message = model.decode(encoded_trump_df)
                        encoded_cage_df_message = model.decode(encoded_cage_df)
                        
                        #解码原始图像
                        encoded_trump_message = model.decode(encoded_trump)
                        encoded_cage_message = model.decode(encoded_cage)

                        #计算val message_loss
                        message_loss = message_criterion(encoded_trump_df_message, trump_message) + \
                                    message_criterion(encoded_cage_df_message, cage_message) + \
                                    message_criterion(encoded_trump_message, trump_message) + \
                                    message_criterion(encoded_cage_message, cage_message)
                        
                        #计算val df信息准确样本频数
                        #encoded_trump_df_messag为模型输出连续值, 经过>0.5 浮点数组可以转换为0/1数组
                        #与真实值比较得到布尔数组,
                        val_df_message_correct += ((encoded_trump_df_message > 0.5) == trump_message).sum().item() + \
                                                    ((encoded_cage_df_message > 0.5) == cage_message).sum().item() 
                        #计算val 编码信息准确样本频数
                        val_message_correct += ((encoded_trump_message > 0.5) == trump_message).sum().item() + \
                                                    ((encoded_cage_message > 0.5) == cage_message).sum().item() 
                        
                        #加权 loss
                        message_loss *= args.lambda_val
                        val_message_loss += message_loss
                        
                    #累计loss
                    val_image_loss += image_loss
                    #累计样本数
                    val_size += trump_val_x.shape[0] + cage_val_x.shape[0]
                
                #求样本平均loss
                val_image_loss /= val_size
                val_message_loss /= val_size

                #求样本准确率
                val_message_acc = val_message_correct / (val_size * args.message_size)
                val_loss = val_image_loss + val_message_loss
                val_df_message_acc = val_df_message_correct / (val_size * args.message_size)
                
                #大于最小val的loss(初始为1)与开始df的轮次后
                if self.min_val_loss > val_loss and i > args.start_decode :
                    
                    #每轮保存checkpoint
                    print(f'model saved at epoch {i}')
                    #保存path
                    path = os.path.join(self.SAVE_PATH, args.name)

                    #保证每次loss有所下降再保存
                    self.min_val_loss = val_loss

                    if not os.path.isdir(path) :
                        os.makedirs(path)
                    
                    torch.save({
                        "encoder" : model.encoder.state_dict(),
                        "decoder" : model.decoder.state_dict()
                    }, os.path.join(path, "loss_best.pt"))

                print(f"Val image loss : {val_image_loss}, Val message loss : {val_message_loss}, Val encoded message accuracy : {val_message_acc}, Val DF message accuracy : {val_df_message_acc}")
            torch.cuda.empty_cache()
if __name__ == "__main__":
    trainer = Trainer()
    trainer.train()
