import argparse
import os
import random
from datetime import datetime
from os.path import join

import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm

from ALE.data.dataset import Dataset
from ALE.models.confg_model import config_model
from common_models import gan
from common_utils.Evaluator import Evaluator
from common_utils.utils import load_args

device = 'cuda' if torch.cuda.is_available() else 'cpu'

parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)

parser.add_argument('--config', default='configs/baselines/awa2/ALE.yml',
                    help='path of the config file (training only)')
parser.add_argument('--data_root', default='E:datasets/', help='数据集根路径')
parser.add_argument('--data_dir', default='Animals_with_Attributes2', help='数据集根路径')
parser.add_argument('--dataset', default='AWA2', help='数据集')
parser.add_argument('--feature_type', default='origin', help='origin原始数据特征，')
parser.add_argument('--phase', default='train', help='训练阶段')
parser.add_argument('--image_extractor', default='resnet18', help='特征提取模型')

parser.add_argument('--epochs', type=int, default=1000, help='Training batch size')

parser.add_argument('--cv_dir', default='logs/', help='dir to save checkpoints and logs to')
parser.add_argument('--name', default='temp', help='Name of exp used to name models')

# Model parameters
parser.add_argument('--model', default='simple', help='simple')

# Hyperparameters
parser.add_argument('--workers', type=int, default=8, help="Number of workers")
parser.add_argument('--batch_size', type=int, default=512, help="Training batch size")
parser.add_argument('--lr', type=float, default=0.01, help="Learning rate")
parser.add_argument('--lrg', type=float, default=1e-3, help="Learning rate feature extractor")
parser.add_argument('--wd', type=float, default=5e-5, help="Weight decay")
parser.add_argument('--save_every', type=int, default=10000, help="Frequency of snapshots in epochs")
parser.add_argument('--eval_val_every', type=int, default=1, help="Frequency of eval in epochs")
parser.add_argument('--max_epochs', type=int, default=800, help="Max number of epochs")

# 为再现性设置随机seem
manualSeed = 999
# manualSeed = random.randint(1, 10000) # 如果你想要新的结果就是要这段代码
print("Random Seed: ", manualSeed)
random.seed(manualSeed)
# 设置随机种子确保每次随机生成的随机数序列相同
torch.manual_seed(manualSeed)
if torch.cuda.is_available():
    torch.cuda.manual_seed_all(manualSeed)


def main():
    print(os.getcwd())
    args = parser.parse_args()
    load_args(args.config, args)
    TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S/}".format(datetime.now())
    logpath = os.path.join(args.cv_dir, args.name, 'tensorboard/gen' + "/" + TIMESTAMP)
    os.makedirs(logpath, exist_ok=True)
    writer = SummaryWriter(log_dir=logpath, flush_secs=30)
    modelPath = os.path.join(args.cv_dir, args.name)

    dataset = Dataset(data_dir=join(args.data_root, args.data_dir),  dataset=args.dataset, phase=args.phase)
    testDataset = Dataset(data_dir=join(args.data_root, args.data_dir),  dataset=args.dataset, phase='val')
    trainLoader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)
    testLoader = DataLoader(testDataset, batch_size=args.batch_size, shuffle=True)

    # 生成器
    netG = gan.MLP_Generator(input_dim=dataset.attr_dim, output_dim=dataset.feat_dim,
                             layers=[args.ngh, args.ngh * 2, args.ngh]).to(device)
    # 创建判别器
    netD = gan.MLP_Discriminator(input_dim=dataset.feat_dim, output_dim=1, layers=[int(args.ngh/2)]).to(device)
    # 为 G 和 D 设置 Adam 优化器
    optimizerD = optim.Adam(netD.parameters(), lr=args.lr, betas=(0.5, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=args.lr, betas=(0.5, 0.999))

    gen_criterion = nn.BCEWithLogitsLoss()  # 使用nn.BCELoss()+sigmoid 有问题
    # gen_criterion = Cos_Criterion()  # 使用nn.BCELoss()+sigmoid 有问题
    # gen_criterion = nn.BCELoss()
    best_gen_loss = 0.8
    best_disc_loss = 0.8
    deCre_rate = 0.98  # 递减率
    model, optimizer = config_model(args=args, feat_dim=dataset.feat_dim, attr_dim=dataset.attr_dim)
    for epoch in tqdm(range(0, args.epochs + 1), desc="Current epoch"):
        train_loss_gen,train_loss_disc = train(epoch, args, netD, netG, trainLoader,testLoader,
                                               optimizerD, optimizerG, gen_criterion, writer)
        val(args= args,netG=netG,sig=torch.tensor(dataset.sig).to(device),model=model,optimizer=optimizer,trainLoader=trainLoader)
        if train_loss_gen < best_gen_loss or train_loss_disc < best_disc_loss:
            best_gen_loss = best_gen_loss * deCre_rate
            torch.save({
                'gen_model_state_dict': netG.state_dict(),
                'disc_model_state_dict': netD.state_dict(),
                'gen_total': train_loss_gen,
                'disc_total': train_loss_disc,
            }, join(modelPath, "gen_disc_checkpoint"))
def val(args,netG,sig,model,optimizer,trainLoader):
    train_evaluator = Evaluator(50)
    for epoch in tqdm(range(0,1), desc="Current epoch"):
        train_loss = 0.0
        model.train()  # let's witch to training
        # 添加参数position=0 和 leave=True 是进度条不换行
        for idx, data in tqdm(enumerate(trainLoader), total=len(trainLoader), desc=f'Training  epoch {epoch}',
                              position=0, leave=True):
            data = [d.to(device) for d in data]
            imgs, labels, attrs = data
            imgs = netG(attrs)
            # 根据图片特征学习对象的属性向量
            loss, label_preds = model(imgs=imgs, attrs=attrs, labels=labels, sig=sig)
            optimizer.zero_grad()  # 先将梯度归零
            loss.backward()  # 反向传播计算得到每个参数的梯度值
            optimizer.step()  # 通过梯度下降执行一步参数更新
            train_loss += loss.item()  # 总损失
            with torch.no_grad():
                train_evaluator.evaluate_predictions(attr_preds=None, label_preds=label_preds.data.cpu(),
                                                     attrs=None, labels=labels.data.cpu())
        metric_attr, metric_label = train_evaluator.compute()
        acc, prec, rec = metric_label.values()
        print(f"test Accuracy on all data: {acc}, {prec}, {rec}")
        train_acc, _, _ = metric_label.values()
        train_evaluator.reset()
def train(epoch, args, netD, netG, trainloader,testLoader, optimizerD, optimizerG, criterion, writer):
    loss_train_disc = 0.0
    loss_train_gen = 0.0
    # 添加参数position=0 和 leave=True 是进度条不换行
    for idx, data in tqdm(enumerate(trainloader), total=len(trainloader), desc=f'Training  epoch {epoch}',
                          position=0, leave=True):
        data = [d.to(device) for d in data]
        imgs, labels, attrs = data

        ############################
        # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
        # 更新 D 网络：最大化 log(D(x)) + log(1 - D(G(z)))
        ###########################
        optimizerD.zero_grad()
        b_size = imgs.size(0)
        disc_loss = get_disc_loss(netG, netD, criterion=criterion, attrs=attrs, real=imgs, b_size=b_size,
                                  z_dim=args.nz)
        loss_train_disc += disc_loss.item()
        disc_loss.backward()
        # update D
        optimizerD.step()

        ############################
        # (2) Update G network: maximize log(D(G(z)))
        ###########################
        optimizerG.zero_grad()
        gen_loss = get_gen_loss(netG, netD, criterion=criterion, attrs=attrs, b_size=b_size, z_dim=args.nz)
        # 计算生成器G的梯度
        gen_loss.backward()
        # Update G
        optimizerG.step()
        loss_train_gen += gen_loss.item()

        # Output training stats
        # if idx % 20 == 0:
        #     print('[%d/%d][%d/%d]\tLoss_G: %.4f\tLoss_D: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
        #           % (epoch, args.epochs, idx, len(trainloader),
        #              gen_loss.item(), disc_loss.item(), 0, 0, 0))
    loss_train_gen = loss_train_gen / len(trainloader)
    loss_train_disc = loss_train_disc / len(trainloader)

    writer.add_scalar("Loss/loss_train_gen", loss_train_gen, epoch)
    writer.add_scalar("Loss/loss_train_disc", loss_train_disc, epoch)

    loss_test_disc = 0.0
    loss_test_gen = 0.0
    # 添加参数position=0 和 leave=True 是进度条不换行
    for idx, data in tqdm(enumerate(testLoader), total=len(testLoader), desc=f'Training  epoch {epoch}',
                          position=0, leave=True):
        data = [d.to(device) for d in data]
        _, labels, attrs = data
        imgs = netG(attrs).to(device)
        ############################
        # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
        # 更新 D 网络：最大化 log(D(x)) + log(1 - D(G(z)))
        ###########################
        optimizerD.zero_grad()
        b_size = imgs.size(0)
        disc_loss = get_disc_loss(netG, netD, criterion=criterion, attrs=attrs, real=imgs, b_size=b_size,
                                  z_dim=args.nz)
        loss_test_disc += disc_loss.item()
        disc_loss.backward()
        # update D
        optimizerD.step()

        ############################
        # (2) Update G network: maximize log(D(G(z)))
        ###########################
        optimizerG.zero_grad()
        gen_loss = get_gen_loss(netG, netD, criterion=criterion, attrs=attrs, b_size=b_size, z_dim=args.nz)
        # 计算生成器G的梯度
        gen_loss.backward()
        # Update G
        optimizerG.step()
        loss_test_gen += gen_loss.item()

        # Output training stats
        # if idx % 20 == 0:
        #     print('[%d/%d][%d/%d]\tLoss_G: %.4f\tLoss_D: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
        #           % (epoch, args.epochs, idx, len(testLoader),
        #              gen_loss.item(), disc_loss.item(), 0, 0, 0))
    loss_test_disc = loss_test_disc / len(testLoader)
    loss_test_gen = loss_test_gen / len(testLoader)

    writer.add_scalar("Loss/loss_test_disc", loss_test_disc, epoch)
    writer.add_scalar("Loss/loss_test_gen", loss_test_gen, epoch)
    gen_total = (loss_train_gen + loss_test_gen) / 2
    disc_total = (loss_train_disc + loss_test_disc) / 2
    writer.add_scalar("Loss/gen_total", gen_total, epoch)
    writer.add_scalar("Loss/disc_total", disc_total, epoch)
    print('Epoch: {}| loss_train_gen: {},loss_train_disc: {}, loss_test_gen: {},loss_test_disc: {},gen_total: {}, disc_total: {}'
          .format(epoch,round(loss_train_gen,2),round(loss_train_disc,2),round(loss_test_gen, 2),round(loss_test_disc, 2),round(gen_total, 2),round(disc_total, 2)))
    return gen_total,disc_total


def get_disc_loss(gen, disc, criterion, real, attrs, b_size, z_dim):
    """
     计算鉴别器损失
     input:
    """
    ## Train with all-fake batch
    # 生成一批潜在向量
    fake_noise = torch.randn(b_size, z_dim, device=device)
    # 生成假图
    fake = gen(attrs)
    # 假批次 打分
    disc_fake_pred = disc(fake)
    # 便签全部设置为0
    label = torch.zeros_like(disc_fake_pred)
    # 假图对应打分为0，与0计算损失
    disc_fake_loss = criterion(disc_fake_pred, label)

    # 真实批次 打分
    disc_real_pred = disc(real)
    label = torch.ones_like(disc_real_pred)
    disc_real_loss = criterion(disc_real_pred, label)
    # 鉴别器D 的损失 = (真实特征损失+假特征损失)/2
    disc = (disc_fake_loss + disc_real_loss) / 2
    return disc


def get_gen_loss(gen, disc, criterion, attrs, b_size, z_dim):
    # 生成一批潜在向量
    fake_noise = torch.randn(b_size, z_dim, device=device)
    fake = gen(attrs)
    disc_fake_pred = disc(fake)
    # fake labels are real for generator cost
    label = torch.ones_like(disc_fake_pred)
    # generator要欺骗discriminator
    gen_loss = criterion(disc_fake_pred, label)
    return gen_loss


if __name__ == '__main__':
    main()
