import os
from datetime import datetime
from os.path import join

import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm

from ALE.data.dataset_gzsl2 import Dataset
from ALE.flags import parser
from ALE.models.confg_model import config_model
from common_models import gan
from common_utils.Evaluator import Evaluator, zsl_acc_gzsl2, zsl_acc2
from common_utils.utils import load_args

device = 'cuda' if torch.cuda.is_available() else 'cpu'
parser.add_argument('-es', '--early_stop', default=10, type=int)
def main():
    print(os.getcwd())
    args = parser.parse_args()
    load_args(args.config, args)
    TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S/}".format(datetime.now())
    # logpath = os.path.join(args.cv_dir, args.name, 'tensorboard/' + args.phase + "/" + TIMESTAMP)
    # os.makedirs(logpath, exist_ok=True)

    # writer = SummaryWriter(log_dir=logpath, flush_secs=30)
    modelPath = os.path.join(args.cv_dir, args.name)
    fit_trainval(args = args,modelPath=modelPath)


def fit_trainval(args,modelPath):
    print('\nTraining on trainval set for GZSL...\n')
    dataset = Dataset(data_dir=join(args.data_root, args.data_dir), dataset=args.dataset, phase='trainval')
    test_dataset = Dataset(data_dir=join(args.data_root, args.data_dir), dataset=args.dataset, phase='test')
    model, optimizer = config_model(args=args, feat_dim=dataset.feat_dim, attr_dim=dataset.attr_dim)
    trainLoader = DataLoader(dataset, batch_size=len(dataset), shuffle=True)
    testLoader = DataLoader(test_dataset, batch_size=len(test_dataset), shuffle=True)
    train_evaluator = Evaluator(50)
    val_evaluator = Evaluator(50)
    best_acc = 0.5
    incre_rate = 0.1  # 递增率
    incre_rate_decay = 0.9  # 递增率衰减率
    sig = dataset.sig.to(device)
    # 生成器
    netG = gan.MLP_Generator(input_dim=args.attSize, output_dim=args.resSize,
                             layers=[args.ngh, args.ngh * 2, args.ngh]).to(device)
    checkpoint = torch.load(join(modelPath, "gen_disc_checkpoint"),map_location= device)
    netG.load_state_dict(checkpoint['gen_model_state_dict'])
    for epoch in tqdm(range(0, args.epochs + 1), desc="Current epoch"):
        train_loss = 0.0
        model.train()  # let's witch to training
        # 添加参数position=0 和 leave=True 是进度条不换行
        for idx, data in tqdm(enumerate(trainLoader), total=len(trainLoader), desc=f'Training  epoch {epoch}',
                              position=0, leave=True):
            data = [d.to(device) for d in data]
            imgs, labels, attrs = data
            imgs = netG(attrs)
            # 根据图片特征学习对象的属性向量
            loss,label_preds = model(imgs=imgs,attrs=attrs,labels=labels,sig = sig)
            optimizer.zero_grad()  # 先将梯度归零
            loss.backward()  # 反向传播计算得到每个参数的梯度值
            optimizer.step()  # 通过梯度下降执行一步参数更新
            train_loss += loss.item()  # 总损失
            with torch.no_grad():
                train_evaluator.evaluate_predictions(attr_preds=None, label_preds=label_preds.data.cpu(),
                                     attrs=None,labels=labels.data.cpu())
        metric_attr,metric_label = train_evaluator.compute()
        printMetric(metric=metric_label,phase='train',metric_type = 'label')
        train_acc, _, _ = metric_label.values()
        train_evaluator.reset()
        for idx, data in tqdm(enumerate(testLoader), total=len(testLoader), desc=f'Training  epoch {epoch}',
                              position=0, leave=True):
            data = [d.to(device) for d in data]
            _, labels, attrs = data
            imgs = netG(attrs)
            # 根据图片特征学习对象的属性向量
            loss,label_preds = model(imgs=imgs,attrs=attrs,labels=labels,sig = sig)
            optimizer.zero_grad()  # 先将梯度归零
            loss.backward()  # 反向传播计算得到每个参数的梯度值
            optimizer.step()  # 通过梯度下降执行一步参数更新
            train_loss += loss.item()  # 总损失
            with torch.no_grad():
                val_evaluator.evaluate_predictions(attr_preds=None, label_preds=label_preds.data.cpu(),
                                     attrs=None,labels=labels.data.cpu())
        metric_attr,metric_label = val_evaluator.compute()
        printMetric(metric=metric_label,phase='gen_train',metric_type = 'label')
        val_evaluator.reset()
        if train_acc > best_acc:
            best_acc += (1 - best_acc) * incre_rate
            incre_rate *= incre_rate_decay
            torch.save({
                'epoch': epoch,
                'train_acc': train_acc,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict()
            }, join(modelPath, "checkpoint"))

def get_gen_loss(gen, disc, criterion, attrs, b_size, z_dim):
    # 生成一批潜在向量
    fake_noise = torch.randn(b_size, z_dim, device=device)
    fake = gen(attrs)
    disc_fake_pred = disc(fake)
    # fake labels are real for generator cost
    label = torch.ones_like(disc_fake_pred)
    # generator要欺骗discriminator
    gen_loss = criterion(disc_fake_pred, label)
    return gen_loss
def printMetric(metric,phase,metric_type):
    if metric is None:
        return
    acc, prec, rec = metric.values()
    print(f"{phase} {metric_type}  Accuracy on all data: {acc}, {prec}, {rec}")
if __name__ == '__main__':
    main()
