import os.path
from argparse import Namespace

import matplotlib.pyplot as plt
import numpy as np
import torch.cuda
from torch import optim
from tqdm import tqdm_notebook
import seaborn as sns

from NMTDemo.Dataset.NMTDataset import NMTDataset, generate_nmt_batches
from NMTDemo.GeneralUtilities.GeneralUtilities import set_seed_everywhere, handle_dirs, make_train_state, sequence_loss, \
    compute_accuracy, update_train_state
from NMTDemo.GeneralUtilities.NMTSampler import NMTSampler
from NMTDemo.NMTModel.NMTModel import NMTModel


args=Namespace(dataset_csv="data/nmt/simplest_eng_fra.csv",
               vectorizer_file="vectorizer.json",
               model_state_file="model.pth",
               save_dir="model/nmt",
               reload_from_files=True,
               expand_filepaths_to_save_dir=True,
               cuda=False,
               seed=1337,
               learning_rate=5e-4,
               batch_size=64,
               num_epochs=100,
               early_stopping_criteria=5,
               source_embedding_size=64,
               target_embedding_size=64,
               encoding_size=64,
               catch_keyboard_interrupt=True)

if __name__=='__main__':
    args.vectorizer_file=os.path.join(args.save_dir,args.vectorizer_file)
    args.model_state_file=os.path.join(args.save_dir,args.model_state_file)
    print("Expanded filepaths:")
    print("\t{}".format(args.vectorizer_file))
    print("\t{}".format(args.model_state_file))

    if not torch.cuda.is_available():
        args.cuda=False

    #检测CUDA
    args.device=torch.device("cuda" if args.cuda else "cpu")
    print("Using CUDA:{}".format(args.cuda))
    #设置随机数的种子
    set_seed_everywhere(args.seed,args.cuda)
    handle_dirs(args.save_dir)

    if args.reload_from_files and os.path.exists(args.vectorizer_file):
        dataset=NMTDataset.load_dataset_and_load_vectorizer(args.dataset_csv,args.vectorizer_file)
    else:
        dataset=NMTDataset.load_dataset_and_make_vectorizer(args.dataset_csv)
        dataset.save_vectorizer(args.vectorizer_file)
    vectorizer=dataset.get_vectorizer()
    model=NMTModel(source_vocab_size=len(vectorizer.source_vocab),
                   source_embedding_size=args.source_embedding_size,
                   target_vocab_size=len(vectorizer.target_vocab),
                   target_embedding_size=args.target_embedding_size,
                   encoding_size=args.encoding_size,
                   target_bos_index=vectorizer.target_vocab.begin_seq_index)

    if args.reload_from_files and os.path.exists(args.model_state_file):
        model.load_state_dict(torch.load(args.model_state_file))
        print("Reloaded model")
    else:
        print("New model")

    model=model.to(args.device)
    #梯度优化器
    optimizer=optim.Adam(model.parameters(),lr=args.learning_rate)
    scheduler=optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
                                                   mode='min',factor=0.5,
                                                   patience=1)
    mask_index=vectorizer.target_vocab.mask_index
    train_state=make_train_state(args)

    epoch_bar = tqdm_notebook(desc='training routine',
                              total=args.num_epochs,
                              position=0)

    dataset.set_split('train')
    train_bar = tqdm_notebook(desc='split=train',
                              total=dataset.get_num_batches(args.batch_size),
                              position=1,
                              leave=True)
    dataset.set_split('val')
    val_bar = tqdm_notebook(desc='split=val',
                            total=dataset.get_num_batches(args.batch_size),
                            position=1,
                            leave=True)

    # try:
    #     for epoch_index in range(args.num_epochs):
    #         train_state['epoch_index']=epoch_index
    #         #训练集训练数据
    #         dataset.set_split('train')
    #         batch_generator=generate_nmt_batches(dataset,batch_size=args.batch_size,device=args.device)
    #         running_loss=0.0
    #         running_acc=0.0
    #         model.train()
    #
    #         for batch_index,batch_dict in enumerate(batch_generator):
    #             #梯度清零
    #             optimizer.zero_grad()
    #             #计算输出
    #             y_pred=model(batch_dict['x_source'],
    #                          batch_dict['x_source_length'],
    #                          batch_dict['x_target'])
    #             #计算损失
    #             loss=sequence_loss(y_pred,batch_dict['y_target'],mask_index)
    #             #用损失反向传播
    #             optimizer.step()
    #             running_loss+=(loss.item()-running_loss)/(batch_index+1)
    #             #计算准确率
    #             acc_t=compute_accuracy(y_pred,batch_dict['y_target'],mask_index)
    #             running_acc+=(acc_t-running_acc)/(batch_index+1)
    #             #更新进度条
    #             train_bar.set_postfix(loss=running_loss,acc=running_acc,
    #                                   epoch=epoch_index)
    #             train_bar.update()
    #
    #         train_state['train_loss'].append(running_loss)
    #         train_state['train_acc'].append(running_acc)
    #
    #         dataset.set_split('val')
    #         batch_generator=generate_nmt_batches(dataset,
    #                                             batch_size=args.batch_size,
    #                                             device=args.device)
    #         running_loss=0.
    #         running_acc=0.
    #         model.eval()
    #
    #         for batch_index,batch_dict in enumerate(batch_generator):
    #             #计算输出
    #             y_pred=model(batch_dict['x_source'],
    #                         batch_dict['x_source_length'],
    #                         batch_dict['x_target'])
    #             #计算损失
    #             loss=sequence_loss(y_pred,batch_dict['y_target'],mask_index)
    #             running_loss+=(loss.item()-running_loss)/(batch_index+1)
    #             #计算准确率
    #             acc_t=compute_accuracy(y_pred,batch_dict['y_target'],mask_index)
    #             running_acc+=(acc_t-running_acc)/(batch_index+1)
    #             #更新进度条
    #             val_bar.set_postfix(loss=running_loss,acc=running_acc,epoch=epoch_index)
    #             val_bar.update()
    #
    #         train_state['val_loss'].append(running_loss)
    #         train_state['val_acc'].append(running_acc)
    #
    #         train_state=update_train_state(args=args,model=model,
    #                                     train_state=train_state)
    #         scheduler.step(train_state['val_loss'][-1])
    #         if train_state['stop_early']:
    #             break
    #
    #         train_bar.n=0
    #         val_bar.n=0
    #         epoch_bar.set_postfix(best_val=train_state['early_stopping_best_val'])
    #         epoch_bar.update()
    # except KeyboardInterrupt:
    #     print("Exiting loop")

    model.eval()
    model=model.eval().to(args.device)
    #bleu评分标准与注意力矩阵
    sampler=NMTSampler(vectorizer,model)

    dataset.set_split('test')
    batch_generator=generate_nmt_batches(dataset,batch_size=args.batch_size,device=args.device)
    test_results=[]
    for batch_dict in batch_generator:
        sampler.apply_to_batch(batch_dict)
        for i in range(args.batch_size):
            test_results.append(sampler.get_ith_item(i,False))

    plt.hist([r['bleu-4'] for r in test_results],bins=100);
    print(np.mean([r['bleu-4'] for r in test_results]), np.median([r['bleu-4'] for r in test_results]))

    dataset.set_split('val')
    batch_generator = generate_nmt_batches(dataset,
                                           batch_size=args.batch_size,
                                           device=args.device)
    batch_dict = next(batch_generator)

    model = model.eval().to(args.device)
    sampler = NMTSampler(vectorizer, model)
    sampler.apply_to_batch(batch_dict)

    all_results = []
    for i in range(args.batch_size):
        all_results.append(sampler.get_ith_item(i, False))

    top_results = [x for x in all_results if x['bleu-4'] > 0.01]
    print(len(top_results))

    for sample in top_results:
        plt.figure()
        target_len = len(sample['sampled'])
        source_len = len(sample['source'])

        attention_matrix = sample['attention'][:target_len, :source_len + 2].transpose()  # [::-1]
        ax = sns.heatmap(attention_matrix, center=0.0)
        ylabs = ["<BOS>"] + sample['source'] + ["<EOS>"]
        # ylabs = sample['source']
        # ylabs = ylabs[::-1]
        ax.set_yticklabels(ylabs, rotation=0)
        ax.set_xticklabels(sample['sampled'], rotation=90)
        ax.set_xlabel("Target Sentence")
        ax.set_ylabel("Source Sentence\n\n")
        plt.show()
