import os
import random
from datetime import datetime
from os.path import join

import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm

from GAN.data.dataset_gzsl import Dataset
from common_utils.utils import load_args
from models.gan import Generator, Discriminator
from GAN.flags import parser

device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 为再现性设置随机seem
manualSeed = 999
# manualSeed = random.randint(1, 10000) # 如果你想要新的结果就是要这段代码
print("Random Seed: ", manualSeed)
random.seed(manualSeed)
# 设置随机种子确保每次随机生成的随机数序列相同
torch.manual_seed(manualSeed)
if torch.cuda.is_available():
    torch.cuda.manual_seed_all(manualSeed)


def main():
    print(os.getcwd())
    args = parser.parse_args()
    load_args(args.config, args)
    TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S/}".format(datetime.now())
    logpath = os.path.join(args.cv_dir, args.name, 'tensorboard/gen' + "/" + TIMESTAMP)
    os.makedirs(logpath, exist_ok=True)
    writer = SummaryWriter(log_dir=logpath, flush_secs=30)
    modelPath = os.path.join(args.cv_dir, args.name)

    dataset = Dataset(data_dir=join(args.data_root, args.data_dir),  dataset=args.dataset, phase='train')
    testDataset = Dataset(data_dir=join(args.data_root, args.data_dir),  dataset=args.dataset, phase='val')
    trainLoader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)
    testLoader = DataLoader(testDataset, batch_size=args.batch_size, shuffle=True)

    # 生成器
    netG = Generator(scale_factor=args.upscale_factor)
    # 创建判别器
    netD = Discriminator()
    # 为 G 和 D 设置 Adam 优化器
    optimizerD = optim.Adam(netD.parameters(), lr=args.lr, betas=(0.5, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=args.lr, betas=(0.5, 0.999))

    gen_criterion = nn.BCEWithLogitsLoss()  # 使用nn.BCELoss()+sigmoid 有问题
    # gen_criterion = Cos_Criterion()  # 使用nn.BCELoss()+sigmoid 有问题
    # gen_criterion = nn.BCELoss()
    best_gen_loss = 0.8
    best_disc_loss = 0.8
    deCre_rate = 0.98  # 递减率
    for epoch in tqdm(range(0, args.epochs + 1), desc="Current epoch"):
        train_loss_gen,train_loss_disc = train(epoch, args, netD, netG, trainLoader,testLoader,
                                               optimizerD, optimizerG, gen_criterion, writer)
        if train_loss_gen < best_gen_loss or train_loss_disc < best_disc_loss:
            best_gen_loss = best_gen_loss * deCre_rate
            torch.save({
                'gen_model_state_dict': netG.state_dict(),
                'disc_model_state_dict': netD.state_dict(),
                'gen_total': train_loss_gen,
                'disc_total': train_loss_disc,
            }, join(modelPath, "gen_disc_checkpoint"))
def train(epoch, args, netD, netG, trainloader,testLoader, optimizerD, optimizerG, criterion, writer):
    loss_train_disc = 0.0
    loss_train_gen = 0.0
    # 添加参数position=0 和 leave=True 是进度条不换行
    for idx, data in tqdm(enumerate(trainloader), total=len(trainloader), desc=f'Training  epoch {epoch}',
                          position=0, leave=True):
        data = [d.to(device) for d in data]
        imgs, labels, attrs = data

        ############################
        # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
        # 更新 D 网络：最大化 log(D(x)) + log(1 - D(G(z)))
        ###########################
        optimizerD.zero_grad()
        b_size = imgs.size(0)
        disc_loss = get_disc_loss(netG, netD, criterion=criterion, attrs=attrs, real=imgs, b_size=b_size,
                                  z_dim=args.nz)
        loss_train_disc += disc_loss.item()
        disc_loss.backward()
        # update D
        optimizerD.step()

        ############################
        # (2) Update G network: maximize log(D(G(z)))
        ###########################
        optimizerG.zero_grad()
        gen_loss = get_gen_loss(netG, netD, criterion=criterion, attrs=attrs, b_size=b_size, z_dim=args.nz)
        # 计算生成器G的梯度
        gen_loss.backward()
        # Update G
        optimizerG.step()
        loss_train_gen += gen_loss.item()

        # Output training stats
        # if idx % 20 == 0:
        #     print('[%d/%d][%d/%d]\tLoss_G: %.4f\tLoss_D: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
        #           % (epoch, args.epochs, idx, len(trainloader),
        #              gen_loss.item(), disc_loss.item(), 0, 0, 0))
    loss_train_gen = loss_train_gen / len(trainloader)
    loss_train_disc = loss_train_disc / len(trainloader)

    writer.add_scalar("Loss/loss_train_gen", loss_train_gen, epoch)
    writer.add_scalar("Loss/loss_train_disc", loss_train_disc, epoch)

    loss_test_disc = 0.0
    loss_test_gen = 0.0
    # 添加参数position=0 和 leave=True 是进度条不换行
    for idx, data in tqdm(enumerate(testLoader), total=len(testLoader), desc=f'Training  epoch {epoch}',
                          position=0, leave=True):
        data = [d.to(device) for d in data]
        _, labels, attrs = data
        imgs = netG(attrs).to(device)
        ############################
        # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
        # 更新 D 网络：最大化 log(D(x)) + log(1 - D(G(z)))
        ###########################
        optimizerD.zero_grad()
        b_size = imgs.size(0)
        disc_loss = get_disc_loss(netG, netD, criterion=criterion, attrs=attrs, real=imgs, b_size=b_size,
                                  z_dim=args.nz)
        loss_test_disc += disc_loss.item()
        disc_loss.backward()
        # update D
        optimizerD.step()

        ############################
        # (2) Update G network: maximize log(D(G(z)))
        ###########################
        optimizerG.zero_grad()
        gen_loss = get_gen_loss(netG, netD, criterion=criterion, attrs=attrs, b_size=b_size, z_dim=args.nz)
        # 计算生成器G的梯度
        gen_loss.backward()
        # Update G
        optimizerG.step()
        loss_test_gen += gen_loss.item()

        # Output training stats
        # if idx % 20 == 0:
        #     print('[%d/%d][%d/%d]\tLoss_G: %.4f\tLoss_D: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
        #           % (epoch, args.epochs, idx, len(testLoader),
        #              gen_loss.item(), disc_loss.item(), 0, 0, 0))
    loss_test_disc = loss_test_disc / len(testLoader)
    loss_test_gen = loss_test_gen / len(testLoader)

    writer.add_scalar("Loss/loss_test_disc", loss_test_disc, epoch)
    writer.add_scalar("Loss/loss_test_gen", loss_test_gen, epoch)
    gen_total = (loss_train_gen + loss_test_gen) / 2
    disc_total = (loss_train_disc + loss_test_disc) / 2
    writer.add_scalar("Loss/gen_total", gen_total, epoch)
    writer.add_scalar("Loss/disc_total", disc_total, epoch)
    print('Epoch: {}| loss_train_gen: {},loss_train_disc: {}, loss_test_gen: {},loss_test_disc: {},gen_total: {}, disc_total: {}'
          .format(epoch,round(loss_train_gen,2),round(loss_train_disc,2),round(loss_test_gen, 2),round(loss_test_disc, 2),round(gen_total, 2),round(disc_total, 2)))
    return gen_total,disc_total


def get_disc_loss(gen, disc, criterion, real, attrs, b_size, z_dim):
    """
     计算鉴别器损失
     input:
    """
    ## Train with all-fake batch
    # 生成一批潜在向量
    fake_noise = torch.randn(b_size, z_dim, device=device)
    # 生成假图
    fake = gen(attrs)
    # 假批次 打分
    disc_fake_pred = disc(fake)
    # 便签全部设置为0
    label = torch.zeros_like(disc_fake_pred)
    # 假图对应打分为0，与0计算损失
    disc_fake_loss = criterion(disc_fake_pred, label)

    # 真实批次 打分
    disc_real_pred = disc(real)
    label = torch.ones_like(disc_real_pred)
    disc_real_loss = criterion(disc_real_pred, label)
    # 鉴别器D 的损失 = (真实特征损失+假特征损失)/2
    disc = (disc_fake_loss + disc_real_loss) / 2
    return disc


def get_gen_loss(gen, disc, criterion, attrs, b_size, z_dim):
    # 生成一批潜在向量
    fake_noise = torch.randn(b_size, z_dim, device=device)
    fake = gen(attrs)
    disc_fake_pred = disc(fake)
    # fake labels are real for generator cost
    label = torch.ones_like(disc_fake_pred)
    # generator要欺骗discriminator
    gen_loss = criterion(disc_fake_pred, label)
    return gen_loss


if __name__ == '__main__':
    main()
