import os
from datetime import datetime
from os.path import join

import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm

from Expreriment.data.dataset import Dataset
from Expreriment.flags import parser
from Expreriment.models.confg_model import config_model
from common_utils.Evaluator import Evaluator
from common_utils.utils import load_args

from models.model import MLP_Generator as Generator,MLP_Discriminator as Discriminator

device = 'cuda' if torch.cuda.is_available() else 'cpu'

parser.add_argument('--phase', default='train', help='训练阶段')
def main():
    print(os.getcwd())
    args = parser.parse_args()
    load_args(args.config, args)
    TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S/}".format(datetime.now())
    logpath = os.path.join(args.cv_dir, args.name, 'tensorboard/' + args.phase + "/" + TIMESTAMP)
    os.makedirs(logpath, exist_ok=True)

    modelPath = os.path.join(args.cv_dir, args.name)
    # 生成器
    netG = Generator(opt=args)
    gan_path = join(modelPath, "gan_feature_AWA2_455.pkl")
    checkpoint = torch.load(gan_path, map_location=torch.device(device))
    netG.load_state_dict(checkpoint['gen'])
    dataset = Dataset(data_dir=join(args.data_root, args.data_dir),  dataset=args.dataset, phase=args.phase,
                           image_root=args.image_root)
    model, optimizer = config_model(args=args, dataset=dataset)
    train_evaluator = Evaluator(args.n_classes)
    val_evaluator = Evaluator(args.n_classes)
    train_size = int(0.8 * len(dataset))
    val_size = len(dataset) - train_size
    trainSet, valSet = torch.utils.data.random_split(dataset, [train_size, val_size])
    trainLoader = DataLoader(trainSet, batch_size=args.batch_size, shuffle=True,drop_last=True)
    valLoader = DataLoader(valSet, batch_size=args.batch_size, shuffle=True,drop_last=True)
    writer = SummaryWriter(log_dir=logpath, flush_secs=30)
    crossEntropyLoss = nn.CrossEntropyLoss()
    lossFunction = torch.nn.BCEWithLogitsLoss()
    # lossFunction = Cos_Criterion()
    best_acc = 0.5
    incre_rate = 0.1  # 递增率
    incre_rate_decay = 0.9  # 递增率衰减率

    for epoch in tqdm(range(0, args.epochs + 1), desc="Current epoch"):
        train_loss = 0.0
        train_loss_D = 0.0
        train_loss_G = 0.0
        model.train()  # let's witch to training
        # 添加参数position=0 和 leave=True 是进度条不换行
        for idx, data in tqdm(enumerate(trainLoader), total=len(trainLoader), desc=f'Training  epoch {epoch}',
                              position=0, leave=True):
            data = [d.to(device) for d in data]
            imgs, labels, attrs = data
            # 根据图片特征学习对象的属性
            loss,attr_preds,label_preds = model(imgs=imgs,attrs=attrs,labels=labels)
            # 计算余弦相似度
            # loss = lossFunction(attr_preds, attrs)
            optimizer.zero_grad()  # 先将梯度归零
            # for los in loss:
            #     los.backward()
            loss.backward()  # 反向传播计算得到每个参数的梯度值
            # loss_label.backward()
            optimizer.step()  # 通过梯度下降执行一步参数更新
            # train_loss += (sum(loss)/len(loss)).item()  # 总损失
            train_loss += loss.item()  # 总损失
            with torch.no_grad():
                train_evaluator.evaluate_predictions(attr_preds=None, label_preds=label_preds.detach(),
                                                     attrs=dataset.attrs, labels=labels)
        metric_attr,metric_label = train_evaluator.compute()
        printMetric(metric=metric_attr, phase='train', metric_type='attr')
        printMetric(metric=metric_label, phase='train', metric_type='label')
        train_evaluator.reset()
        train_loss = train_loss / len(trainLoader)
        writer.add_scalar("Loss/train_total", train_loss, epoch)
        print('Epoch: {}| train Loss: {}'.format(epoch, round(train_loss, 2)))
        train_loss_G = train_loss_G / len(trainLoader)
        train_loss_D = train_loss_D / len(trainLoader)
        print('Epoch: {}| train train_loss_G: {},train_loss_D: {}'.format(epoch, round(train_loss_G, 2),
                                                                          round(train_loss_D, 2)))
        val_loss = 0.0
        for idx, data in tqdm(enumerate(valLoader), total=len(valLoader), desc=f'val  epoch {epoch}', position=0,
                              leave=True):
            data = [d.to(device) for d in data]
            _, labels, attrs = data
            imgs = netG(attrs, labels)
            model.eval()
            loss,attr_preds,label_preds = model(imgs=imgs,attrs=attrs,labels=labels)
            # loss = lossFunction(attr_preds, attrs)
            # val_loss += (sum(loss)/len(loss)).item()  # 总损失
            val_loss += loss.item()  # 总损失
            val_evaluator.evaluate_predictions(attr_preds=None, label_preds=label_preds.detach(),
                                               attrs=dataset.attrs, labels=labels)
            # print(f"val Accuracy on batch {idx}: {batch_acc}")
        # metric on all batches using custom accumulation
        metric_attr,metric_label = val_evaluator.compute()
        printMetric(metric=metric_attr,phase='val',metric_type = 'attr')
        printMetric(metric=metric_label,phase='val',metric_type = 'label')
        val_evaluator.reset()
        val_loss = val_loss / len(valLoader)
        writer.add_scalar("Loss/val_total", val_loss, epoch)
        print('Epoch: {}| val Loss: {}'.format(epoch, round(val_loss, 2)))
        if metric_attr is not  None:
            val_acc, _, _ = metric_attr.values()
        else:
            val_acc, _, _ = metric_label.values()
        if val_acc > best_acc:
            best_acc += (1 - best_acc) * incre_rate
            incre_rate *= incre_rate_decay
            torch.save({
                'epoch': epoch,
                'train_loss': train_loss,
                'metric': metric_attr,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict()
            }, join(modelPath, "checkpoint"))
            attrs = {}
            for idx in range(50):
                if idx not in attrs:

                    # 使用torch.where()函数获取在张量b中存在的元素在张量a中的索引
                    indices = torch.where(torch.eq(labels, (torch.tensor(idx,dtype=torch.long))))
                    if len(indices[0]) > 0:
                        indice = indices[0][0]
                        att = attr_preds[indice]
                        attrs.update({idx:att})
            torch.save(attrs, join(modelPath, "attrs"))
def printMetric(metric,phase,metric_type):
    if metric is None:
        return
    acc, prec, rec = metric.values()
    print(f"{phase} {metric_type}  Accuracy on all data: {acc}, {prec}, {rec}")
    # writer.add_scalar("acc/val_total", acc, epoch)
    # writer.add_scalar("prec/val_total", prec, epoch)
    # writer.add_scalar("rec/val_total", rec, epoch)

if __name__ == '__main__':
    main()
