import os
from datetime import datetime
from os.path import join

import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm

from ALE.data.dataset_gzsl import Dataset
from ALE.flags import parser
from ALE.models.confg_model import config_model
from common_models import gan
from common_utils.Evaluator import Evaluator, zsl_acc_gzsl2, zsl_acc2
from common_utils.utils import load_args

device = 'cuda' if torch.cuda.is_available() else 'cpu'
parser.add_argument('-es', '--early_stop', default=10, type=int)
def main():
    print(os.getcwd())
    args = parser.parse_args()
    load_args(args.config, args)
    TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S/}".format(datetime.now())
    # logpath = os.path.join(args.cv_dir, args.name, 'tensorboard/' + args.phase + "/" + TIMESTAMP)
    # os.makedirs(logpath, exist_ok=True)

    # writer = SummaryWriter(log_dir=logpath, flush_secs=30)
    modelPath = os.path.join(args.cv_dir, args.name)
    fit_trainval(args = args,modelPath=modelPath)


def fit_trainval(args,modelPath):
    print('\nTraining on trainval set for GZSL...\n')
    dataset = Dataset(data_dir=join(args.data_root, args.data_dir), dataset=args.dataset, phase='trainval')
    test_dataset = Dataset(data_dir=join(args.data_root, args.data_dir), dataset=args.dataset, phase='test')
    model, optimizer = config_model(args=args, feat_dim=dataset.feat_dim, attr_dim=dataset.attr_dim)
    trainLoader = DataLoader(dataset, batch_size=len(dataset), shuffle=True)
    testLoader = DataLoader(test_dataset, batch_size=len(test_dataset), shuffle=True)
    best_acc = 0.5
    incre_rate = 0.1  # 递增率
    incre_rate_decay = 0.9  # 递增率衰减率
    sig = dataset.sig.to(device)
    # 生成器
    netG = gan.MLP_Generator(input_dim=args.attSize, output_dim=args.resSize,
                             layers=[args.ngh, args.ngh * 2, args.ngh]).to(device)
    checkpoint = torch.load(join(modelPath, "gen_disc_checkpoint"),map_location= device)
    netG.load_state_dict(checkpoint['gen_model_state_dict'])
    for epoch in tqdm(range(0, args.epochs + 1), desc="Current epoch"):
        train_loss = 0.0
        model.train()  # let's witch to training
        # 添加参数position=0 和 leave=True 是进度条不换行
        for idx, data in tqdm(enumerate(trainLoader), total=len(trainLoader), desc=f'Training  epoch {epoch}',
                              position=0, leave=True):
            data = [d.to(device) for d in data]
            imgs, labels, attrs = data
            # imgs = netG(attrs)
            # 根据图片特征学习对象的属性向量
            loss,attr_preds = model(imgs=imgs,attrs=attrs,labels=labels,sig = sig)
            optimizer.zero_grad()  # 先将梯度归零
            loss.backward()  # 反向传播计算得到每个参数的梯度值
            optimizer.step()  # 通过梯度下降执行一步参数更新
            train_loss += loss.item()  # 总损失
            with torch.no_grad():
                train_acc = zsl_acc2(attr_preds=attr_preds.data.cpu(), y_true=labels.data.cpu(),
                                     sig=dataset.attrs.data.cpu())
                if idx % 5 == 0:
                    print(f"idx={idx},train_loss={train_loss},acc={train_acc}")
        for idx, data in tqdm(enumerate(testLoader), total=len(testLoader), desc=f'Training  epoch {epoch}',
                              position=0, leave=True):
            data = [d.to(device) for d in data]
            _, labels, attrs = data
            imgs = netG(attrs)
            # 根据图片特征学习对象的属性向量
            loss,attr_preds = model(imgs=imgs,attrs=attrs,labels=labels,sig = sig)
            optimizer.zero_grad()  # 先将梯度归零
            loss.backward()  # 反向传播计算得到每个参数的梯度值
            optimizer.step()  # 通过梯度下降执行一步参数更新
            train_loss += loss.item()  # 总损失
            with torch.no_grad():
                gen_train_acc = zsl_acc2(attr_preds=attr_preds.data.cpu(), y_true=labels.data.cpu(),
                                     sig=test_dataset.attrs.data.cpu())
                if idx % 5 == 0:
                    print(f"gen idx={idx},train_loss={train_loss},acc={gen_train_acc}")
        if train_acc > best_acc:
            best_acc += (1 - best_acc) * incre_rate
            incre_rate *= incre_rate_decay
            torch.save({
                'epoch': epoch,
                'train_acc': train_acc,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict()
            }, join(modelPath, "checkpoint"))
        # with torch.no_grad():
        #     model.eval()
        #     sig = torch.index_select(dataset.sig,dim=0,index=dataset.labels_test_seen);
        #     _,test_seen_preds = model(imgs= dataset.test_seen_data,attrs = sig)
        #     acc_seen_classes = zsl_acc_gzsl2(attr_preds=test_seen_preds.detach(),
        #                                      y_true=dataset.labels_test_seen,
        #                         classes=dataset.test_classes_seen,
        #                         sig=dataset.sig)
        #     sig2 = torch.index_select(dataset.sig, dim=0,
        #                                 index=dataset.labels_test_unseen)
        #     _,test_unseen_preds = model(imgs=dataset.test_unseen_data.detach(), attrs=sig2)
        #     acc_unseen_classes = zsl_acc_gzsl2(attr_preds=test_unseen_preds.detach(), y_true=dataset.labels_test_unseen,
        #                                      classes=dataset.test_classes_unseen,
        #                                      sig=dataset.sig)
        #     print('U:{}; S:{};'.format(acc_unseen_classes, acc_seen_classes))
if __name__ == '__main__':
    main()
