import os.path
from argparse import Namespace

import torch.cuda
from torch import nn, optim
from tqdm import tqdm_notebook

from PytorchNLP3Yelp.GeneralUtilities import set_seed_everywhere, handle_dirs
from PytorchNLP3Yelp.ReviewClassifier import ReviewClassifier
from PytorchNLP3Yelp.ReviewDataset import ReviewDataset, generate_batches
from PytorchNLP3Yelp.Training_Routine import make_train_state, compute_accuracy, update_train_state

if __name__ == '__main__':
    args=Namespace(
        frequency_cutoff=25,
        model_state_file='model.pth',
        review_csv='data/yelp/reviews_with_splits_lite.csv',
        save_dir='model/yelp/',
        vectorizer_file='vectorizer.json',
        batch_size=128,
        early_stopping_criteria=5,
        learning_rate=0.001,
        num_epochs=100,
        seed=1337,
        catch_keyboard_interrupt=True,
        cuda=True,
        expand_filepaths_to_save_dir=True,
        reload_from_files=False,
    )

    if args.expand_filepaths_to_save_dir:
        args.vectorizer_file=os.path.join(args.save_dir,
                                          args.vectorizer_file)
        args.model_state_file=os.path.join(args.save_dir,
                                           args.model_state_file)
        print("Expanded filepaths: ")
        print("\t{}".format(args.vectorizer_file))
        print("\t{}".format(args.model_state_file))

    if not torch.cuda.is_available():
        args.cuda=False

    print("Using CUDA: {}".format(args.cuda))

    args.device=torch.device("cuda" if args.cuda else "cpu")

    set_seed_everywhere(args.seed,args.cuda)

    handle_dirs(args.save_dir)

    if args.reload_from_files:
        #如果有词向量文件
        print("Loading dataset and vectorizer")
        dataset=ReviewDataset.load_dataset_and_load_vectorizer(args.review_csv,
                                                               args.vectorizer_file)
    else:
        #如果没有词向量文件，创建
        print("Loading dataset and creating vectorizer")
        dataset=ReviewDataset.load_dataset_and_make_vectorizer(args.review_csv)
        dataset.save_vectorizer(args.vectorizer_file)

    vectorizer=dataset.get_vectorizer()

    classifier=ReviewClassifier(num_features=len(vectorizer.review_vocab))
    classifier=classifier.to(args.device)

    #损失函数 BCEWithLogitsLoss
    loss_func=nn.BCEWithLogitsLoss()
    #优化器 Adam优化器,并自动调整学习率
    optimizer=optim.Adam(classifier.parameters(),lr=args.learning_rate)
    scheduler=optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,mode='min',factor=0.5,patience=1)
    #训练状态
    train_state=make_train_state(args)
    #训练进度条
    epoch_bar=tqdm_notebook(desc='training routine',total=args.num_epochs,position=0)

    # dataset.set_split('train')
    # train_bar=tqdm_notebook(desc='split=train',total=dataset.get_num_batches(args.batch_size),position=1,leave=True)
    # dataset.set_split('val')
    # val_bar=tqdm_notebook(desc='split=val',total=dataset.get_num_batches(args.batch_size),position=1,leave=True)
    #
    # try:
    #     for epoch_index in range(args.num_epochs):
    #         train_state['epoch_index']=epoch_index
    #         dataset.set_split('train')
    #         batch_generator=generate_batches(dataset,batch_size=args.batch_size,device=args.device)
    #         running_loss=0.0
    #         running_acc=0.0
    #         classifier.train()
    #
    #         for batch_index,batch_dict in enumerate(batch_generator):
    #             #1.梯度归0
    #             optimizer.zero_grad()
    #             #2.计算输出
    #             y_pred=classifier(x_in=batch_dict['x_data'].float())
    #             #3.计算损失
    #             loss=loss_func(y_pred,batch_dict['y_target'].float())
    #             loss_t=loss.item()
    #             running_loss+=(loss_t-running_loss)/(batch_index+1)
    #             #4 用损失计算梯度
    #             loss.backward()
    #             #5.用优化器进行梯度更新
    #             optimizer.step()
    #
    #             #计算准确度
    #             acc_t=compute_accuracy(y_pred,batch_dict['y_target'])
    #             running_acc+=(acc_t-running_acc)/(batch_index+1)
    #
    #             #更新进度条
    #             train_bar.set_postfix(loss=running_loss,acc=running_acc,epoch=epoch_index)
    #             train_bar.update()
    #
    #         train_state['train_loss'].append(running_loss)
    #         train_state['train_acc'].append(running_acc)
    #
    #         #用验证集验证
    #         dataset.set_split('val')
    #         batch_generator=generate_batches(dataset,batch_size=args.batch_size,device=args.device)
    #         running_loss=0.
    #         running_acc=0.
    #         classifier.eval()
    #
    #         for batch_index,batch_dict in enumerate(batch_generator):
    #             #计算输出
    #             y_pred=classifier(x_in=batch_dict['x_data'].float())
    #             #计算损失
    #             loss=loss_func(y_pred,batch_dict['y_target'].float())
    #             loss_t=loss.item()
    #             running_loss+=(loss_t-running_loss)/(batch_index+1)
    #             #计算准确度
    #             acc_t=compute_accuracy(y_pred,batch_dict['y_target'])
    #             running_acc+=(acc_t-running_acc)/(batch_index+1)
    #
    #             val_bar.set_postfix(loss=running_loss,acc=running_acc,epoch=epoch_index)
    #             val_bar.update()
    #
    #         train_state['val_loss'].append(running_loss)
    #         train_state['val_acc'].append(running_acc)
    #         #训练的状态
    #         train_state=update_train_state(args=args,model=classifier,train_state=train_state)
    #
    #         scheduler.step(train_state['val_loss'][-1])
    #
    #         train_bar.n=0
    #         val_bar.n=0
    #         epoch_bar.update()
    #
    #         if train_state['stop_early']:
    #             break
    #
    #         train_bar.n=0
    #         val_bar.n=0
    #         epoch_bar.update()
    #
    # except KeyboardInterrupt:
    #     print("Exiting loop")

    #测试集准确率
    classifier.load_state_dict(torch.load(train_state['model_filename']))
    classifier=classifier.to(args.device)

    dataset.set_split('test')
    batch_generator=generate_batches(dataset,batch_size=args.batch_size,device=args.device)

    running_loss=0.
    running_acc=0.
    classifier.eval()

    for batch_index,batch_dict in enumerate(batch_generator):
        #计算输出
        y_pred=classifier(x_in=batch_dict['x_data'].float())
        #计算损失
        loss=loss_func(y_pred,batch_dict['y_target'].float())
        loss_t=loss.item()
        running_loss+=(loss_t-running_loss)/(batch_index+1)
        #计算准确率
        acc_t=compute_accuracy(y_pred,batch_dict['y_target'])
        running_acc+=(acc_t-running_acc)/(batch_index+1)

    train_state['test_loss']=running_loss
    train_state['test_acc']=running_acc

    print("Test loss:{:.3f}".format(train_state['test_loss']))
    print("Test Accuracy:{:.2f}".format((train_state['test_acc'])))