import os
from datetime import datetime
from os.path import join

import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm

from ALE.data.dataset_gzsl import Dataset
from ALE.flags import parser
from ALE.models.confg_model import config_model
from common_utils.Evaluator import Evaluator, zsl_acc_gzsl2
from common_utils.utils import load_args

device = 'cuda' if torch.cuda.is_available() else 'cpu'

parser.add_argument('--phase', default='train', help='训练阶段')
parser.add_argument('-es', '--early_stop', default=10, type=int)
def main():
    print(os.getcwd())
    args = parser.parse_args()
    load_args(args.config, args)
    TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S/}".format(datetime.now())
    logpath = os.path.join(args.cv_dir, args.name, 'tensorboard/' + args.phase + "/" + TIMESTAMP)
    os.makedirs(logpath, exist_ok=True)

    writer = SummaryWriter(log_dir=logpath, flush_secs=30)

    # num_epochs_trainval = fit_train(args)
    best_W = fit_trainval(args = args, writer = writer)


def fit_trainval(args,writer):
    print('\nTraining on trainval set for GZSL...\n')
    modelPath = os.path.join(args.cv_dir, args.name)
    dataset = Dataset(data_dir=join(args.data_root, args.data_dir), dataset=args.dataset, phase=args.phase)
    val_dataset = Dataset(data_dir=join(args.data_root, args.data_dir), dataset=args.dataset, phase='val')
    model, optimizer = config_model(args=args, feat_dim=dataset.feat_dim, attr_dim=dataset.attr_dim)
    train_evaluator = Evaluator(len(dataset.labels))
    val_evaluator = Evaluator(len(val_dataset.labels))
    trainLoader = DataLoader(dataset, batch_size=len(dataset), shuffle=True)
    valLoader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True)
    best_acc = 0.2
    incre_rate = 0.1  # 递增率
    incre_rate_decay = 0.9  # 递增率衰减率

    for epoch in tqdm(range(0, args.epochs + 1), desc="Current epoch"):
        train_loss = 0.0
        train_loss_D = 0.0
        train_loss_G = 0.0
        model.train()  # let's witch to training
        # 添加参数position=0 和 leave=True 是进度条不换行
        for idx, data in tqdm(enumerate(trainLoader), total=len(trainLoader), desc=f'Training  epoch {epoch}',
                              position=0, leave=True):
            data = [d.to(device) for d in data]
            imgs, labels, attrs = data
            # imgs = netG(attrs)
            # 根据图片特征学习对象的属性
            loss,attr_preds = model(imgs=imgs,attrs=attrs)
            # 计算余弦相似度
            # loss = lossFunction(attr_preds, attrs)
            optimizer.zero_grad()  # 先将梯度归零
            # for los in loss:
            #     los.backward()
            loss.backward()  # 反向传播计算得到每个参数的梯度值
            # loss_label.backward()
            optimizer.step()  # 通过梯度下降执行一步参数更新
            # train_loss += (sum(loss)/len(loss)).item()  # 总损失
            train_loss += loss.item()  # 总损失
            with torch.no_grad():
                acc = zsl_acc_gzsl2(attr_preds=attr_preds.detach(), y_true=labels,
                                                    classes=dataset.train_classes_seen,
                                                     sig=dataset.attrs)
                if idx %5 == 0:
                    print(f"idx={idx},acc={acc}")
        val_loss = 0.0
        for idx, data in tqdm(enumerate(valLoader), total=len(valLoader), desc=f'val  epoch {epoch}', position=0,
                              leave=True):
            data = [d.to(device) for d in data]
            imgs, labels, attrs = data
            model.eval()
            loss,attr_preds = model(imgs=imgs,attrs=attrs)
            # loss = lossFunction(attr_preds, attrs)
            # val_loss += (sum(loss)/len(loss)).item()  # 总损失
            val_loss += loss.item()  # 总损失
            val_evaluator.evaluate_predictions(attr_preds=attr_preds.detach(), label_preds=None,
                                               attrs=val_dataset.attrs, labels=labels)
            break
            # print(f"val Accuracy on batch {idx}: {batch_acc}")
        # metric on all batches using custom accumulation
        metric_attr,metric_label = val_evaluator.compute()
        printMetric(metric=metric_attr,phase='val',metric_type = 'attr')
        printMetric(metric=metric_label,phase='val',metric_type = 'label')
        val_evaluator.reset()
        val_loss = val_loss / len(valLoader)
        writer.add_scalar("Loss/val_total", val_loss, epoch)
        print('Epoch: {}| val Loss: {}'.format(epoch, round(val_loss, 2)))
        if metric_attr is not None:
            val_acc, _, _ = metric_attr.values()
        else:
            val_acc, _, _ = metric_label.values()
        if val_acc > best_acc:
            best_acc += (1 - best_acc) * incre_rate
            incre_rate *= incre_rate_decay
            torch.save({
                'epoch': epoch,
                'train_loss': train_loss,
                'metric': metric_attr,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict()
            }, join(modelPath, "checkpoint"))

def printMetric(metric,phase,metric_type):
    if metric is None:
        return
    acc, prec, rec = metric.values()
    print(f"{phase} {metric_type}  Accuracy on all data: {acc}, {prec}, {rec}")
if __name__ == '__main__':
    main()
