import os.path
from argparse import Namespace

import torch.cuda
from torch import nn, optim
from tqdm import tqdm_notebook

from PytorchNLP5CBOW.CBOWClassifier import CBOWClassifier
from PytorchNLP5CBOW.CBOWDataset import CBOWDataset, generate_batches
from PytorchNLP5CBOW.GeneralUtilities import set_seed_everywhere, handle_dirs
from PytorchNLP5CBOW.Training_Routine import make_train_state, compute_accuracy, update_train_state

args=Namespace(
    cbow_csv="data/books/frankenstein_with_splits.csv",
    vectorizer_file="vectorizer.json",
    model_state_file="model.pth",
    save_dir="model/cbow",
    embedding_size=50,
    seed=1337,
    num_epochs=100,
    learning_rate=0.0001,
    batch_size=32,
    early_stopping_criteria=5,
    cuda=True,
    catch_keyboard_interrupt=True,
    reload_from_files=False,
    expand_filepaths_to_save_dir=True
)

if __name__=='__main__':
    #保存文件的目录
    if args.expand_filepaths_to_save_dir:
        args.vectorizer_file=os.path.join(args.save_dir,args.vectorizer_file)
        args.model_state_file=os.path.join(args.save_dir,args.model_state_file)
        print("Expanded filepaths: ")
        print("\t{}".format(args.vectorizer_file))
        print("\t{}".format(args.model_state_file))
    #检测CUDA
    if not torch.cuda.is_available():
        args.cuda=False
    args.device=torch.device("cuda" if args.cuda else "cpu")
    print("Using CUDA: {}".format(args.cuda))
    #设置随机化种子
    set_seed_everywhere(args.seed,args.cuda)
    #如果没有目标则创建目录
    handle_dirs(args.save_dir)

    if args.reload_from_files:#如果直接加载词向量
        dataset=CBOWDataset.load_dataset_and_load_vectorizer(args.cbow_csv,args.vectorizer_file)
    else:#否则创建词向量,并保存为文件
        dataset=CBOWDataset.load_dataset_and_make_vectorizer(args.cbow_csv)
        dataset.save_vectorizer(args.vectorizer_file)

    #词向量
    vectorizer=dataset.get_vectorizer()
    #模型
    classifier=CBOWClassifier(vocabulary_size=len(vectorizer.cbow_vocab),
                              embedding_size=args.embedding_size)
    classifier=classifier.to(args.device)
    #损失函数
    loss_func=nn.CrossEntropyLoss()
    #优化器
    optimizer=optim.Adam(classifier.parameters(),lr=args.learning_rate)
    scheduler=optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,mode='min',
                                                   factor=0.5,patience=1)
    train_state=make_train_state(args)
    epoch_bar=tqdm_notebook(desc='training_routine',total=args.num_epochs,position=0)
    dataset.set_split('train')
    train_bar=tqdm_notebook(desc='split=train',total=dataset.get_num_batches(args.batch_size),position=1,leave=True)
    dataset.set_split('val')
    val_bar=tqdm_notebook(desc='split=val',total=dataset.get_num_batches(args.batch_size),position=1,leave=True)

    try:
        for epoch_index in range(args.num_epochs):
            train_state['epoch_index']=epoch_index
            dataset.set_split('train')
            batch_generator=generate_batches(dataset,batch_size=args.batch_size,device=args.device)
            running_loss=0.0
            running_acc=0.0
            classifier.train()

            for batch_index,batch_dict in enumerate(batch_generator):
                #1,梯度清零
                optimizer.zero_grad()
                #2，计算输出
                y_pred=classifier(x_in=batch_dict['x_data'])
                #3.计算损失
                loss=loss_func(y_pred,batch_dict['y_target'])
                loss_t=loss.item()
                running_loss+=(loss_t-running_loss)/(batch_index+1)
                #4.反向传播
                loss.backward()
                #5.梯度更新
                optimizer.step()
                #计算准确率
                acc_t=compute_accuracy(y_pred,batch_dict['y_target'])
                running_acc+=(acc_t-running_acc)/(batch_index+1)

                #更新进度条
                train_bar.set_postfix(loss=running_loss,acc=running_acc,epoch=epoch_index)
                train_bar.update()

            train_state['train_loss'].append(running_loss)
            train_state['train_acc'].append(running_acc)

            #验证集
            dataset.set_split('val')
            batch_generator=generate_batches(dataset,batch_size=args.batch_size,device=args.device)
            running_loss=0.
            running_acc=0.
            classifier.eval()

            for batch_index,batch_dict in enumerate(batch_generator):
                #计算输出
                y_pred=classifier(x_in=batch_dict['x_data'])
                #计算损失
                loss=loss_func(y_pred,batch_dict['y_target'])
                loss_t=loss.item()
                running_loss+=(loss_t-running_loss)/(batch_index+1)
                #计算准确率
                acc_t=compute_accuracy(y_pred,batch_dict['y_target'])
                running_acc+=(acc_t-running_acc)/(batch_index+1)
                val_bar.set_postfix(loss=running_loss,acc=running_acc,epoch=epoch_index)
                val_bar.update()

            train_state['val_loss'].append(running_loss)
            train_state['val_acc'].append(running_acc)

            train_state=update_train_state(args=args,model=classifier,train_state=train_state)

            scheduler.step(train_state['val_loss'][-1])

            if train_state['stop_early']:
                break

            train_bar.n=0
            val_bar.n=0
            epoch_bar.update()

    except KeyboardInterrupt:
        print("Exiting loop")

    #测试集
    classifier.load_state_dict(torch.load(train_state['model_filename']))
    classifier=classifier.to(args.device)
    loss_func=nn.CrossEntropyLoss()

    dataset.set_split('test')
    batch_generator=generate_batches(dataset,batch_size=args.batch_size,device=args.device)
    running_loss=0.
    running_acc=0.
    classifier.eval()

    for batch_index,batch_dict in enumerate(batch_generator):
        y_pred=classifier(x_in=batch_dict['x_data'])
        #损失
        loss=loss_func(y_pred,batch_dict['y_target'])
        loss_t=loss.item()
        running_loss+=(loss_t-running_loss)/(batch_index+1)
        #准确率
        acc_t=compute_accuracy(y_pred,batch_dict['y_target'])
        running_acc+=(acc_t-running_acc)/(batch_index+1)

    train_state['test_loss']=running_loss
    train_state['test_acc']=running_acc

    print("Test loss:{};".format(train_state['test_loss']))
    print("Test Accuracy:{}".format(train_state['test_acc']))



#打印嵌入的结果
def pretty_print(results):
    for item in results:
        print("...[%.2f]-%s"%(item[1],item[0]))

#计算与其他词语的距离
def get_closest(target_word,word_to_idx,embeddings,n=5):
    word_embedding=embeddings[word_to_idx[target_word.lower()]]
    distances=[]
    for word,index in word_to_idx.items():
        if word=="<MASK>" or word==target_word:
            continue
        distances.append((word,torch.dist(word_embedding,embeddings[index])))

    results=sorted(distances,key=lambda x:x[1])[1:n+2]
    return results


