import os
import random
import time
from datetime import datetime
from os.path import join

import numpy as np
import torch
from numpy.random import shuffle
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm

from ALE.data.dataset_gzsl import Dataset
from ALE.flags import parser
from ALE.models.confg_model import config_model
from common_models import gan
from common_utils.Evaluator import zsl_acc, zsl_acc_gzsl
from common_utils.utils import load_args, l2_normalize

device = 'cuda' if torch.cuda.is_available() else 'cpu'

parser.add_argument('--phase', default='train', help='训练阶段')
parser.add_argument('-es', '--early_stop', default=10, type=int)

"""
模拟ale的广义零样本学习
"""
def main():
    print(os.getcwd())
    args = parser.parse_args()
    load_args(args.config, args)
    TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S/}".format(datetime.now())
    logpath = os.path.join(args.cv_dir, args.name, 'tensorboard/' + args.phase + "/" + TIMESTAMP)
    os.makedirs(logpath, exist_ok=True)

    writer = SummaryWriter(log_dir=logpath, flush_secs=30)

    # num_epochs_trainval = fit_train(args)
    best_W = fit_trainval(args)


def fit_trainval(args):
    print('\nTraining on trainval set for GZSL...\n')
    best_tr_acc = 0.0
    best_tr_ep = -1
    dataset = Dataset(data_dir=join(args.data_root, args.data_dir), dataset=args.dataset, phase='trainval')
    trainLoader = DataLoader(dataset, batch_size=len(dataset), shuffle=True)
    W = torch.rand(dataset.feat_dim, dataset.attr_dim).to(device)
    W = l2_normalize(W.T).T
    beta = np.zeros(len(np.unique(dataset.labels)))
    for i in range(1, beta.shape[0]):
        sum_alpha = 0.0
        for j in range(1, i + 1):
            sum_alpha += 1 / j
        beta[i] = sum_alpha

    for epoch in tqdm(range(0, args.epochs + 1), desc="Current epoch"):
        # 添加参数position=0 和 leave=True 是进度条不换行
        for idx, data in tqdm(enumerate(trainLoader), total=len(trainLoader), desc=f'Training  epoch {epoch}',
                              position=0, leave=True):
            start = time.time()
            data = [d.to(device) for d in data]
            imgs, labels, attrs = data
            # 根据图片特征学习对象的属性
            W = update_W(W,imgs=imgs,attrs=attrs,labels=labels,sig = dataset.attrs,beta= beta,args = args)
            tr_acc = zsl_acc(X = imgs.data.cpu(),W=W.data.cpu(),y_true =labels.data.cpu(),sig = dataset.attrs.data.cpu())
            end = time.time()

            elapsed = end - start

            print('Epoch:{}; Trainval Acc:{}; Time taken:{:.0f}m {:.0f}s\n'.format(idx + 1, tr_acc, elapsed // 60,
                                                                                   elapsed % 60))

            if tr_acc > best_tr_acc:
                best_tr_ep = idx + 1
                best_tr_acc = tr_acc
                best_W = torch.clone(W)

        print('Best Trainval Acc:{} @ Epoch {}\n'.format(best_tr_acc, best_tr_ep))

        acc_seen_classes = zsl_acc_gzsl(X=dataset.test_seen_data, W=best_W,
                                        y_true=dataset.labels_test_seen, classes=dataset.test_classes_seen,
                                        sig=dataset.test_attrs)
        acc_unseen_classes = zsl_acc_gzsl(X=dataset.test_unseen_data, W=best_W,
                                          y_true=dataset.labels_test_unseen, classes=dataset.test_classes_unseen,
                                          sig=dataset.test_attrs)
        acc_gzsl_classes = zsl_acc_gzsl(X=dataset.feat, W=W,
                                          y_true=dataset.labels_all, classes=np.unique(dataset.labels_all),
                                          sig=dataset.sig)
        HM = 2 * acc_seen_classes * acc_unseen_classes / (acc_seen_classes + acc_unseen_classes)
        print(f'U:{acc_unseen_classes}; S:{acc_seen_classes}; H:{HM},acc_gzsl_classes: {acc_gzsl_classes}')

    return best_W
def fit_train(args):
    print('Training on train set...\n')
    best_val_acc = 0.0
    best_tr_acc = 0.0
    best_val_ep = -1
    best_tr_ep = -1
    dataset = Dataset(data_dir=join(args.data_root, args.data_dir), dataset=args.dataset, phase='train')
    W = torch.rand(dataset.feat_dim, dataset.attr_dim).to(device)
    W = l2_normalize(W.T).T
    beta = np.zeros(len(np.unique(dataset.labels)))
    for i in range(1, beta.shape[0]):
        sum_alpha = 0.0
        for j in range(1, i + 1):
            sum_alpha += 1 / j
        beta[i] = sum_alpha

    trainLoader = DataLoader(dataset, batch_size=len(dataset), shuffle=True)

    for epoch in tqdm(range(0, args.epochs + 1), desc="Current epoch"):
        # 添加参数position=0 和 leave=True 是进度条不换行
        for idx, data in tqdm(enumerate(trainLoader), total=len(trainLoader), desc=f'Training  epoch {epoch}',
                              position=0, leave=True):
            start = time.time()
            data = [d.to(device) for d in data]
            imgs, labels, attrs = data
            # 根据图片特征学习对象的属性
            W = update_W(W, imgs=imgs, attrs=attrs, labels=labels, sig=dataset.attrs, beta=beta, args=args)
            tr_acc = zsl_acc(X=imgs.data.cpu(), W=W.data.cpu(), y_true=labels.data.cpu(),
                                sig=dataset.attrs.data.cpu())
            val_acc = zsl_acc(X=dataset.val_classes_seen, W=W,y_true=dataset.labels_val,sig=dataset.val_attrs)
            end = time.time()

            elapsed = end - start
            print('Epoch:{}; Train Acc:{}; Val Acc:{}; Time taken:{:.0f}m {:.0f}s\n'.format(epoch + 1, tr_acc, val_acc,
                                                                                            elapsed // 60,
                                                                                            elapsed % 60))
            if val_acc > best_val_acc:
                best_val_acc = val_acc
                best_val_ep = epoch + 1

            if tr_acc > best_tr_acc:
                best_tr_ep = epoch + 1
                best_tr_acc = tr_acc

            if epoch + 1 - best_val_ep > args.early_stop:
                print('Early Stopping by {} epochs. Exiting...'.format(args.epochs - (epoch + 1)))
                break
    print('Best Val Acc:{} @ Epoch {}. Best Train Acc:{} @ Epoch {}\n'.format(best_val_acc, best_val_ep, best_tr_acc, best_tr_ep))
    return best_val_ep
def update_W(W, imgs, attrs=None,labels=None, sig=None, beta=None,args = None):
    rand_idx = np.arange(len(imgs))
    shuffle(rand_idx)
    train_classes = torch.unique(labels)
    for j in rand_idx:
        X_n = imgs[j]
        y_n = labels[j]
        y_ = train_classes[train_classes != y_n]
        XW = torch.matmul(X_n, W)
        gt_class_score = torch.matmul(XW, attrs[j])
        for i in range(len(y_)):
            label = random.choice(y_)
            score = 1 + torch.matmul(XW, sig[label]) - gt_class_score
            if score > 0:
                Y = sig[y_n] - sig[label]
                Y = Y.unsqueeze(dim=0)
                temp = X_n.unsqueeze(dim=-1)
                delta = torch.matmul(X_n.unsqueeze(dim=-1), Y)
                be = beta[int(y_.shape[0] / (i + 1))]
                W += args.lr * be * delta
                break
    return W
if __name__ == '__main__':
    main()
