import os
import sys
sys.path.append('../')
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
# tensorboard --logdir Training/tensorboard/NRPresG_iters_random_normTrue_TI-DIMtest

import argparse
import time
import cv2

from torchvision import transforms
from utils.PairedTransforms_util import *
from utils.losses_util import *
from utils.AverageMeter_util import *
from utils.PairedImageDataset_util import ImageDataset, PairedImageDataset, AdvImageDataset, tensor2img
from torchvision.utils import save_image
from networks import NRP, NRP_resG, Critic, FeatureExtractor
from torch.optim.lr_scheduler import MultiStepLR, ReduceLROnPlateau, CosineAnnealingLR
import pandas as pd
import timm
from PIL import Image

# 通过设置随机种子可使训练结果可复现。
def set_random_seed(seed):
    """Set random seeds."""
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

def get_hms(seconds): # def make_readable(seconds):
    m, s = divmod(seconds, 60)
    h, m = divmod(m, 60)
    # divmod函数：本函数是实现a除以b, 然后返回商与余数的元组。

    return h, m, s
    # return '%02d:%02d:%02d' % (h,m,s)

# 评估模型时的数据加载
IMG_EXTENSIONS = ['.png', '.jpg']
def find_inputs(folder, filename_to_target=None, types=IMG_EXTENSIONS):
    inputs = []
    for root, _, files in os.walk(folder, topdown=False):
        for rel_filename in files:
            base, ext = os.path.splitext(rel_filename)
            abs_filename = os.path.join(root, rel_filename)
            # target = filename_to_target[rel_filename] if filename_to_target else 0
            target = filename_to_target[base] if filename_to_target else 0
            inputs.append((abs_filename, target))
    return inputs

class Dataset(torch.utils.data.Dataset):

    def __init__(
            self,
            root,
            advroot,
            target_file='target_class.csv',
            transform=None):

        if target_file:
            # target_df = pd.read_csv(os.path.join(root, target_file), header=None)
            target_df = pd.read_csv(target_file, header=None)
            # f_to_t = dict(zip(target_df[0], target_df[1] - 1))  # -1 for 0-999 class ids
            f_to_t = dict(zip(target_df[0], target_df[6]))
            # dict() 函数用于创建一个字典。
            # zip() 函数用于将可迭代的对象作为参数，将对象中对应的元素打包成一个个元组，然后返回由这些元组组成的列表。
        else:
            f_to_t = dict()
        imgs = find_inputs(root, filename_to_target=f_to_t)
        if len(imgs) == 0:
            raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n"
                               "Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))

        self.root = root
        self.advroot = advroot
        self.imgs = imgs
        self.transform = transform

    def __getitem__(self, index):
        path, target = self.imgs[index]
        adv_path = os.path.join(self.advroot, path[-20:]) # clean、adv均为
        # adv_path = os.path.join(self.advroot, path[-20:-4] + '.jpg') # clean为png, adv为jpg
        # print(path, target)
        img = Image.open(path).convert('RGB')
        adv = Image.open(adv_path).convert('RGB')
        if self.transform is not None:
            img = self.transform(img)
            adv = self.transform(adv)
        if target is None:
            target = torch.zeros(1).long()
        # return img, target
        return img, adv, int(target) - 1

    def __len__(self):
        return len(self.imgs)

    def set_transform(self, transform):
        self.transform = transform

    def filenames(self, indices=[], basename=False):
        if indices:
            if basename:
                return [os.path.basename(self.imgs[i][0]) for i in indices]
            else:
                return [self.imgs[i][0] for i in indices]
        else:
            if basename:
                return [os.path.basename(x[0]) for x in self.imgs]
            else:
                return [x[0] for x in self.imgs]

# ----------
#  Training
# ----------
def train(epoch):
    print('************************************* Training *************************************')
    netG.train()
    netD.train()
    
    losses_D = AverageMeter()
    losses_D_real = AverageMeter()
    losses_D_fake = AverageMeter()
    losses_G = AverageMeter()
    losses_feat = AverageMeter()
    losses_adv = AverageMeter()
    losses_pixel = AverageMeter()
    batch_time = AverageMeter()
    
    end = time.time()

    for i, (x, x_adv) in enumerate(train_loader):

        x, x_adv = x.to(device), x_adv.to(device)
        
        # ------------------
        #  Train Generators, netG
        # ------------------
        for p in netD.parameters():
            p.requires_grad = False

        # Set grads to zero for new iter
        optimizer_G.zero_grad()

        # Generate a high resolution image from low resolution input
        purified_x_adv = netG(x_adv)

        l_g_total = 0
        # Measure pixel-wise loss against ground truth
        l_g_pix = criterion_pixel(purified_x_adv, x)
        l_g_total += l_g_pix

        # Feature loss
        l_g_percep, l_g_style = criterion_feat(purified_x_adv, x)
        if l_g_percep is not None:
            l_g_total += l_g_percep
        if l_g_style is not None:
            l_g_total += l_g_style

        # Extract validity predictions from discriminator
        # Adversarial loss (relativistic average GAN) for G
        real_d_pred = netD(x).detach() # .detach()用于切断反向传播
        fake_g_pred = netD(purified_x_adv)
        l_g_real = criterion_GAN(real_d_pred - torch.mean(fake_g_pred), False, is_disc=False)
        l_g_fake = criterion_GAN(fake_g_pred - torch.mean(real_d_pred), True, is_disc=False)
        l_g_gan = (l_g_real + l_g_fake) / 2
        
        l_g_total += l_g_gan        

        # Total generator loss
        l_g_total.backward()
        optimizer_G.step()

        # ------------------
        #  Train Discriminator, netD
        # ------------------
        for p in netD.parameters():
            p.requires_grad = True

        # Set grads to zero for new iter
        optimizer_D.zero_grad()
        
        # gan loss (relativistic gan)
        # real
        fake_d_pred = netD(purified_x_adv).detach()
        real_d_pred = netD(x)
        l_d_real = criterion_GAN(real_d_pred - torch.mean(fake_d_pred), True, is_disc=True) * 0.5
        l_d_real.backward()
        # fake
        fake_d_pred = netD(purified_x_adv.detach())
        l_d_fake = criterion_GAN(fake_d_pred - torch.mean(real_d_pred.detach()), False, is_disc=True) * 0.5
        l_d_fake.backward()
        
        l_d_total = l_d_real + l_d_fake
        optimizer_D.step()
        
        # --------------
        #  Log Progress
        # --------------
        losses_D.update(l_d_total.item(), x.size(0))
        losses_D_real.update(l_d_real.item(), x.size(0))
        losses_D_fake.update(l_d_fake.item(), x.size(0))
        losses_G.update(l_g_total.item(), x.size(0))
        losses_feat.update(l_g_percep.item(), x.size(0))
        losses_adv.update(l_g_gan.item(), x.size(0))
        losses_pixel.update(l_g_pix.item(), x.size(0))
        batch_time.update(time.time() - end)
        end = time.time()
        

        print('Epoch: [{0}/{1}]\t'
                'Batch: [{0}/{1}]'
                'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                'Loss D: {l_d_total.val:.4f} ({l_d_total.avg:.4f})\t'
                'Loss D Real: {l_d_real.val:.4f} ({l_d_real.avg:.4f})\t'
                'Loss D Fake: {l_d_fake.val:.4f} ({l_d_fake.avg:.4f})\t'
                'Loss G: {l_g_total.val:.4f} ({l_g_total.avg:.4f})\t'
                'Loss feat: {l_g_percep.val:.4f} ({l_g_percep.avg:.4f})\t'
                'Loss adv: {l_g_gan.val:.4f} ({l_g_gan.avg:.4f})\t'
                'Loss pixel: {l_g_pix.val:.4f} ({l_g_pix.avg:.4f})'.format(
                epoch, args.n_epochs, 
                i, len(train_loader),
                batch_time=batch_time,
                l_d_total=losses_D,
                l_d_real=losses_D_real,
                l_d_fake=losses_D_fake,
                l_g_total=losses_G, 
                l_g_percep=losses_feat, 
                l_g_gan=losses_adv, 
                l_g_pix=losses_pixel))
      
    
        batches_done = epoch * len(train_loader) + i
        if batches_done % args.sample_interval == 0:
            img_grid = torch.cat((x, x_adv, purified_x_adv), -1) # torch.cat将两个张量（tensor）拼接在一起, cat是concatenate的意思，即拼接，联系在一起。
            save_image(img_grid, os.path.join(args.training_records_dir, 'cat_%d.png' % batches_done), nrow=1, normalize=False)
        
        global MIN_ASR, MIN_LOSS_G
        if l_g_total < MIN_LOSS_G:
            MIN_LOSS_G = l_g_total
            state_dict_netG = {"netG": netG.state_dict(), "optimizer_G": optimizer_G.state_dict(), 'scheduler_G': scheduler_G.state_dict(), 'batches_done': batches_done}
            state_dict_netD = {"netD": netD.state_dict(), "optimizer_D": optimizer_D.state_dict(), 'scheduler_D': scheduler_D.state_dict(), 'batches_done': batches_done}
            torch.save(state_dict_netG, os.path.join(args.saved_models_dir, 'min_LossG_generator.pth'))
            torch.save(state_dict_netD, os.path.join(args.saved_models_dir, 'min_LossG_discriminator.pth'))
            print('Saving models, Min Loss G: ', MIN_LOSS_G)

        if batches_done % args.test_interval == 0:
            asr = evaluate(val_freq=batches_done)
            state_dict_netG = {"netG": netG.state_dict(), "optimizer_G": optimizer_G.state_dict(), 'scheduler_G': scheduler_G.state_dict(), 'batches_done': batches_done}
            state_dict_netD = {"netD": netD.state_dict(), "optimizer_D": optimizer_D.state_dict(), 'scheduler_D': scheduler_D.state_dict(), 'batches_done': batches_done}

            if asr < MIN_ASR:
                MIN_ASR = asr
                torch.save(state_dict_netG, os.path.join(args.saved_models_dir, 'best_ASR_generator.pth'))
                torch.save(state_dict_netD, os.path.join(args.saved_models_dir, 'best_ASR_discriminator.pth'))
       
        # add_scalar方法中第一个参数表示表的名字，第二个参数表示的是你要存的值，第三个参数可以理解为x轴坐标。
        loss_D_writer.add_scalar('Loss D', l_d_total, batches_done)
        loss_D_writer.add_scalar('Loss D real', l_d_real, batches_done)
        loss_D_writer.add_scalar('Loss D fake', l_d_fake, batches_done)
        loss_G_writer.add_scalar('Loss G', l_g_total, batches_done)
        loss_feat_writer.add_scalar('Featrue loss', l_g_percep, batches_done)
        loss_adv_writer.add_scalar('Adversarial loss', l_g_gan, batches_done)
        loss_pixel_writer.add_scalar('Pixel loss', l_g_pix, batches_done)
        
        lr_writer.add_scalar('netG_lr', optimizer_G.state_dict()['param_groups'][0]['lr'], batches_done)
        lr_writer.add_scalar('netD_lr', optimizer_D.state_dict()['param_groups'][0]['lr'], batches_done)

    scheduler_G.step()
    scheduler_D.step()
    
    number = epoch + 1
    if number % args.checkpoint_interval == 0:
        # Save model checkpoints
        # 多GPU训练用model.module.state_dict(), 保存模型
        state_dict_netG = {"netG": netG.state_dict(), "optimizer_G": optimizer_G.state_dict(), 'scheduler_G': scheduler_G.state_dict()}
        state_dict_netD = {"netD": netD.state_dict(), "optimizer_D": optimizer_D.state_dict(), 'scheduler_D': scheduler_D.state_dict()}
        torch.save(state_dict_netG, os.path.join(args.saved_models_dir, 'generator_%d.pth' % number))
        torch.save(state_dict_netD, os.path.join(args.saved_models_dir, 'discriminator_%d.pth' % number))

# ----------
#  Testing
# ----------
# 测试模型的加载及净化图像的视觉效果
def test(epoch):
    print('************************************* Testing *************************************')
    test_data = Image.open(args.testdirs_adv).convert('RGB')
    test_transform = transforms.Compose([
        transforms.ToTensor(),
        # transforms.Normalize(mean, std),
    ])
    test_data = test_transform(test_data)
    test_img = torch.unsqueeze(test_data, dim=0)
    test_img = test_img.to(device)

    number = epoch + 1

    if number % args.checkpoint_interval == 0:
        NETG = NRP_resG(3, 3, 64, 23).to(device)
        # NETG.load_state_dict(torch.load(os.path.join(args.saved_models_dir, 'generator_%d.pth' % number)), False)
        checkpoint_NETG = torch.load(os.path.join(args.saved_models_dir, 'generator_%d.pth' % number)) # 加载断点
        NETG.load_state_dict(checkpoint_NETG['netG'])
        NETG.eval()

        with torch.no_grad():
            reload_out = NETG(test_img)

        save_image(reload_out, os.path.join(args.testdirs_reload, "reload_out_%d.png" % number), nrow=1, normalize=False)

# ------------
#  Evaluating
# ------------
# 已修改
def evaluate(val_freq):
    print('************************************* Evaluating *************************************')
    
    evaluatedirs_adv = args.evaluatedirs_adv

    eval_trans = transforms.ToTensor() # ToTensor : [0, 255] -> [0, 1]
    eval_data = Dataset(root=args.evaluatedirs_cln, 
                        advroot=evaluatedirs_adv,
                        target_file='dev_dataset.csv',
                        transform=eval_trans)
    eval_loader = DataLoader(
            dataset = eval_data,
            batch_size = 1,
            shuffle = False,
            num_workers = 4) # 人们选择 worker 数量的经验法则是将其设置为可用 GPU 数量的四倍, 大于或小于这个数都会降低训练速度。请注意, 增加 num_workers 将增加 CPU 内存消耗。

    netG.eval()
    purified_clean_correct = 0
    purified_adv_correct = 0
    total = 0
    fooled = 0

    for clean_image, adv_image, labels in eval_loader:
        # labels应该形如 tensor([247])
        clean_image = clean_image.to(device)
        adv_image = adv_image.to(device)
        labels = labels.to(device)

        with torch.no_grad():
            purified_clean = netG(clean_image).detach() # 重点！测试时, 干净样本也应该输入净化模型
            purified_adv = netG(adv_image).detach()
        
        for img_index in range(purified_clean.size()[0]):
            purified_clean_outputs = Eval_model(purified_clean[img_index].unsqueeze(0))
            purified_adv_outputs = Eval_model(purified_adv[img_index].unsqueeze(0))

            _, purified_clean_pre = torch.max(purified_clean_outputs.data, 1)
            _, purified_adv_pre = torch.max(purified_adv_outputs.data, 1)
            
            label = labels[img_index]

            total += 1
            print('Number of clean images:', total)
            purified_clean_correct += (purified_clean_pre == label).sum()
            purified_adv_correct += (purified_adv_pre == label).sum()
            fooled += (purified_adv_pre != purified_clean_pre).sum()

    ASR = float(fooled) / total

    Eval_writer.add_scalar('Accuracy of purified clean examples', float(purified_clean_correct) / total, val_freq)
    Eval_writer.add_scalar('%s Accuracy of purified adversarial examples' % os.path.basename(evaluatedirs_adv), float(purified_adv_correct) / total, val_freq)
    Eval_writer.add_scalar('%s Attack success rate' % os.path.basename(evaluatedirs_adv), ASR, val_freq)

    print('Evaluating %s' % os.path.basename(evaluatedirs_adv))
    print('Accuracy of purified clean examples: %f %%' % (100 * float(purified_clean_correct) / total))
    print('Accuracy of purified adversarial examples: %f %%' % (100 * float(purified_adv_correct) / total))
    print('Attack success rate: %f %%' % (100 * ASR))

    return ASR

# 创建解析器, 第一步是创建一个 ArgumentParser 对象
# description - 在参数帮助文档之前显示的文本
parser = argparse.ArgumentParser(description='PyTorch MS-COCO Training. See code for default values.')

parser.add_argument('--mode', default=0, type=int, help='Wether to perform test without trainig (default: 0)')

# STORAGE LOCATION VARIABLES
parser.add_argument('--traindirs_cln', default='', type=str,
                    help='path of clean trainset')
parser.add_argument('--traindirs_adv', default='', type=str,
                    help='path of adversarial trainset')

parser.add_argument('--saved_models_dir', default='Training/saved_models/NRPresG_iters_random_normTrue_TI-DIMtest', type=str)
parser.add_argument('--training_records_dir', default='Training/training_records/NRPresG_iters_random_normTrue_TI-DIMtest', type=str)
parser.add_argument('--tensorboard_dir', default='Training/tensorboard/NRPresG_iters_random_normTrue_TI-DIMtest', type=str)

# 用于测试模型净化的视觉效果
parser.add_argument('--testdirs_adv', default='Translation-Invariant-Attacks/NIPS2017_dataset/adv_FGSM_ensemble_e=16/0aebe24fc257286e.png', type=str,
                    help='path of adversarial testset')
parser.add_argument('--testdirs_reload', default='Training/testing_reload/NRPresG_iters_random_normTrue_TI-DIMtest', type=str,
                    help='save path of the purified image')

# 用于评估模型性能
parser.add_argument('--evaluatedirs_cln', default= 'Translation-Invariant-Attacks/NIPS2017_dataset/images/') # clean_images/
parser.add_argument('--evaluatedirs_adv', default= 'Translation-Invariant-Attacks/NIPS2017_dataset/adv_TI_DIM_ensemble_e=16') # adv_images/

parser.add_argument('--evaluatedirs_purified', default= 'Training/Evaluating/NRPresG_iters_random_normTrue_TI-DIMtest_images/') # data/purified_imgs
parser.add_argument('--Purifier_checkpoint', default= 'Training/saved_models/NRPresG_iters_random_normTrue_TI-DIMtest/min_LossG_generator.pth')

parser.add_argument("--epoch", type=int, default=0, help="epoch to start training from")
parser.add_argument("--n_epochs", type=int, default=200, help="number of epochs of training") # Number of iterations
parser.add_argument("--batch_size", type=int, default=16, help="size of the batches") # Batch size is set to 16
parser.add_argument("--lr", type=float, default=1e-4, help="adam: learning rate") # Learning rates for generator and discriminator are set to 10^−4
parser.add_argument("--b1", type=float, default=0.9, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.99, help="adam: decay of first order momentum of gradient")
parser.add_argument("--sample_interval", type=int, default=1000, help="interval between saving image samples")
parser.add_argument("--checkpoint_interval", type=int, default=10, help="batch interval between model checkpoints")
parser.add_argument("--test_interval", type=int, default=1000)
parser.add_argument("--alpha_adv", type=float, default=5e-3, help="adversarial loss weight") # default=5e-3
parser.add_argument("--gamma_pixel", type=float, default=1e-2, help="pixel-wise loss weight") # default=1e-2
parser.add_argument("--lambda_feat", type=float, default=1.0, help="feature loss weight") # default=1e-2
parser.add_argument('--vgg_type', default='vgg16', help='VGG model that is going to be used in SSP')
parser.add_argument('--layer_weights', default={'relu3_3':1}, help='VGG layer that is going to be used in SSP')

args = parser.parse_args()
print(args)

set_random_seed(0) # 设置随机种子
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

TRAIN_AND_TEST = 0
TEST = 1

if args.mode == TRAIN_AND_TEST:
    os.makedirs(args.saved_models_dir, exist_ok=True)
    os.makedirs(args.training_records_dir, exist_ok=True)
    # os.makedirs(args.testdirs_reload, exist_ok=True)

    # 实例化SummaryWriter对象
    #将tensorboard文件保存在runs/flower_experiment文件中, 运行改代码会自动创建runs/flower_experiment目录
    loss_D_writer = SummaryWriter(log_dir=os.path.join(args.tensorboard_dir, 'Loss D'))
    loss_G_writer = SummaryWriter(log_dir=os.path.join(args.tensorboard_dir, 'Loss G'))
    loss_feat_writer = SummaryWriter(log_dir=os.path.join(args.tensorboard_dir, 'Featrue loss'))
    loss_adv_writer = SummaryWriter(log_dir=os.path.join(args.tensorboard_dir, 'Adversarial loss'))
    loss_pixel_writer = SummaryWriter(log_dir=os.path.join(args.tensorboard_dir, 'Pixel loss'))
    # Weight_writer = SummaryWriter(log_dir=os.path.join(args.tensorboard_dir, 'Weight'))
    # Grad_writer = SummaryWriter(log_dir=os.path.join(args.tensorboard_dir, 'Grad'))
    lr_writer = SummaryWriter(log_dir=os.path.join(args.tensorboard_dir, 'lr'))
    Eval_writer = SummaryWriter(log_dir=os.path.join(args.tensorboard_dir, 'Evaluate'))

    # Initialize generator and discriminator, 初始化生成器、判别器和特征提取器
    # Load Denoiser
    # netG = NRP(3, 3, 64, 23).to(device)
    netG = NRP_resG(3, 3, 64, 23).to(device)
    # Load Discriminator
    netD = Critic(3,64).to(device)

    # Backbone model to evaluate
    Eval_model = timm.create_model('ens_adv_inception_resnet_v2', pretrained=True).to(device)
    Eval_model.eval()

    # Losses, 设置损失函数
    # Featrue loss
    criterion_feat = PerceptualLoss(layer_weights=args.layer_weights, vgg_type=args.vgg_type, use_input_norm=True, range_norm=False, perceptual_weight=args.lambda_feat, style_weight=0., criterion='l1').to(device) # L1 范数损失, MAE, 也是指L1 Loss损失函数, 是把目标值与模型输出(估计值)做绝对值得到的误差。
    # Adversarial loss
    # The pixel and feature losses focus on restoring image content and style, while adversarial loss restores texture details.
    criterion_GAN = GANLoss(gan_type='vanilla', real_label_val=1.0, fake_label_val=0.0, loss_weight=args.alpha_adv).to(device) # 代码中使用BCE函数计算生成损失和对抗损失, 用于测量目标和输出之间的二进制交叉熵。
    # Pixel loss
    # 像素损失是最常见的损失, 通常L2损失能够对大的损失进行惩罚, 但是在小的损失上无能为力, 效果不如L1, 
    # 像素损失实际上并没有考虑到图像质量(如感知质量, 纹理), 经常缺乏高频细节, 并且产生的纹理过于平滑, 难以令人满意
    criterion_pixel = MSELoss(loss_weight=args.gamma_pixel, reduction='mean').to(device) # L2 范数损失, MSE,也是指L2 Loss损失函数, 是把目标值与模型输出(估计值)做差然后平方得到的误差

    # Optimizers, 设置优化器
    optimizer_G = torch.optim.Adam(netG.parameters(), lr=args.lr, betas=(args.b1, args.b2))
    optimizer_D = torch.optim.Adam(netD.parameters(), lr=args.lr, betas=(args.b1, args.b2))
    scheduler_G = MultiStepLR(optimizer_G, milestones=[50, 100, 150], gamma=0.5)
    scheduler_D = MultiStepLR(optimizer_D, milestones=[50, 100, 150], gamma=0.5)

    Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.Tensor

    # 从第2次循环开始, 载入训练得到的生成器和判别器模型
    if args.epoch != 0: 
        checkpoint_netG = torch.load(os.path.join(args.saved_models_dir, 'generator_%d.pth' % args.epoch)) # 加载断点
        netG.load_state_dict(checkpoint_netG['netG']) # 加载模型可学习参数
        optimizer_G.load_state_dict(checkpoint_netG['optimizer_G']) # 加载优化器参数
        scheduler_G.load_state_dict(checkpoint_netG['scheduler_G']) # 加载学习率
        checkpoint_netD = torch.load(os.path.join(args.saved_models_dir, 'discriminator_%d.pth' % args.epoch))
        netD.load_state_dict(checkpoint_netD['netD'])
        optimizer_D.load_state_dict(checkpoint_netD['optimizer_D'])
        scheduler_D.load_state_dict(checkpoint_netD['scheduler_D'])

    # These images are resized to 480 × 480 × 3
    # During training, we randomly crop images of 128 × 128 × 3
    # 设定预训练PyTorch模型的归一化参数
    mean = np.array([0.485, 0.456, 0.406])
    std = np.array([0.229, 0.224, 0.225])
    '''
    train_trans = transforms.Compose([
            # transforms.Resize(128),
            transforms.ToTensor(),
            # transforms.Normalize(mean, std),
        ])
    '''
    train_trans = Compose([
            Resize(480),
            RandomCrop(128),
            ToTensor()
            ])
    train_data = PairedImageDataset(imgcln_dirs=args.traindirs_cln, 
                            imgadv_dirs=args.traindirs_adv,
                            transform=train_trans)
    train_loader = DataLoader(
            dataset = train_data,
            batch_size = args.batch_size,
            shuffle = False, 
            num_workers = 4, # 人们选择 worker 数量的经验法则是将其设置为可用 GPU 数量的四倍, 大于或小于这个数都会降低训练速度。请注意, 增加 num_workers 将增加 CPU 内存消耗。
            drop_last = True)


    print("==================== TRAINING ====================")
    print('\n[Phase 3] : Training model')
    print('| Training Epochs = ' + str(args.n_epochs))
    print('| Initial Learning Rate = ' + str(args.lr))

    elapsed_time = 0
    MIN_ASR = 1
    MIN_LOSS_G = float("inf")
    for epoch in range(args.epoch, args.n_epochs):
        start_time = time.time()

        train(epoch)
        # test(epoch)

        epoch_time = time.time() - start_time
        elapsed_time += epoch_time
        print('| Elapsed time : %d:%02d:%02d' %(get_hms(elapsed_time)))
