
import random
import time
import numpy as np
import argparse
import os

import torch
import torch.optim as optim
import torch.nn as nn


from torchtext import data
from torchtext import datasets
from torchtext import vocab

from Utils.utils import word_tokenize, get_device, epoch_time, classifiction_metric
from Utils.PMR_utils import load_pm_refind

from train_eval import train, evaluate

def main(config):

    if not os.path.exists(config.model_dir):
        os.makedirs(config.model_dir)

    if not os.path.exists(config.log_dir):
        os.makedirs(config.log_dir)

    print("the model name is {}".format(config.model_name))
    device, n_gpu = get_device()

    torch.manual_seed(config.seed)
    np.random.seed(config.seed)
    torch.manual_seed(config.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(config.seed)
        torch.backends.cudnn.deterministic = True  # cudnn 使用确定性算法，保证每次结果一样

    """ 数据准备 """
    text_field = data.Field(tokenize='spacy', lower=True, include_lengths=True, fix_length=config.sequence_length)
    label_field = data.LabelField(dtype=torch.long)

    train_iterator, dev_iterator, test_iterator = load_pm_refind(config.data_path, text_field, label_field, config.batch_size, device, config.glove_word_file, config.cache_path)

    """ 词向量准备 """
    pretrained_embeddings = text_field.vocab.vectors


    """ 模型准备 """
    if config.model_name == "TextCNN":
        from TextCNN import TextCNN
        filter_sizes = [int(val) for val in config.filter_sizes.split()]
        model = TextCNN.TextCNN(config.glove_word_dim, config.filter_num, filter_sizes,
                                config.label_nums, config.dropout, pretrained_embeddings)
    elif config.model_name == "TextRNN":
        from TextRNN import TextRNN
        model = TextRNN.TextRNN(config.glove_word_dim, config.label_nums,
                                config.hidden_size, config.num_layers, config.bidirectional, config.dropout, pretrained_embeddings)

    elif config.model_name == "LSTMATT":
        from LSTM_ATT import LSTMATT
        model = LSTMATT.LSTMATT(config.glove_word_dim, config.label_nums,
                                config.hidden_size, config.num_layers, config.bidirectional, config.dropout, pretrained_embeddings)
    elif config.model_name == 'TextRCNN':
        from TextRCNN import TextRCNN
        model = TextRCNN.TextRCNN(config.glove_word_dim, config.label_nums,config.hidden_size, config.num_layers, config.bidirectional, config.dropout, pretrained_embeddings)

    elif config.model_name == "TransformerText":
        from TransformerText import TransformerText
        model = TransformerText.TransformerText(config.head_num, config.encode_layer, config.glove_word_dim, config.d_model, config.d_ff, config.label_nums, config.dropout, pretrained_embeddings)
    
    optimizer = optim.Adam(model.parameters(), lr=config.learning_rate)
    criterion = nn.CrossEntropyLoss()

    model = model.to(device)
    criterion = criterion.to(device)

    label_list = [str(i) for i in range(config.label_nums)]

    model_file = config.model_dir + "/model_batch-{}_lr-{}.bin".format(
        config.batch_size, config.learning_rate)

    if config.do_train:
        train(config.epoch_num, model, train_iterator, dev_iterator, optimizer, criterion, label_list, model_file, config.log_dir, config.print_step, 'word')

    model.load_state_dict(torch.load(model_file))

    test_loss, test_acc, test_report, test_report_print = evaluate(
        model, test_iterator, criterion, label_list, 'word')
    print("-------------- Test -------------")
    print("\t Loss: {} | Acc: {}".format(test_loss, test_acc))
    
    print(test_report['weighted avg']['f1-score'])
    print(test_report_print)

    with open(config.log_dir + '/log.txt','a') as f:
        f.write("Test: \n")
        f.write(test_report_print)



if __name__ == "__main__":

    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument("--model_name", default="TextCNN", type=str, help="这批参数所属的模型的名字")
    parser.add_argument("--seed", default=3456, type=int, help="随机种子")

    # data_util
    parser.add_argument(
        "--data_path", default="data/sst-2", type=str, help="sst2 数据集位置")
    parser.add_argument(
        "--cache_path", default=".cache", type=str, help="数据缓存地址"
    )
    parser.add_argument(
        "--sequence_length", default=60, type=int, help="句子长度"
    )

    # output
    parser.add_argument(
        "--model_dir", default= "outputs/", type=str, help="输出模型的保存地址"
    )
    parser.add_argument(
        "--log_dir", default="logs/", type=str, help="日志文件地址"
    )
    parser.add_argument("--print_step", default=100,
                        type=int, help="多少步存储一次模型")

    # train
    parser.add_argument("--do_train",
                        action='store_true',
                        help="Whether to run training.")
    
    # optimizer
    parser.add_argument("--batch_size", default=64, type=int)
    parser.add_argument("--epoch_num", default=4, type=int)
    parser.add_argument("--dropout", default=0.4, type=float)
    parser.add_argument("--learning_rate", default=1e-3, type=float)

    # model
    parser.add_argument("--label_nums", default=2, type=int)

    # word Embedding
    parser.add_argument(
        '--glove_word_file',
        default= 'glove.840B.300d.txt',
        type=str, help='path of word embedding file')
    parser.add_argument(
        '--glove_word_size',
        default=int(2.2e6), type=int,
        help='Corpus size for Glove')
    parser.add_argument(
        '--glove_word_dim',
        default=300, type=int,
        help='word embedding size (default: 300)')
        

    args = parser.parse_args()

    if args.model_name == "TextCNN":
        from configs import textcnn_args
        textcnn_args(parser)

    elif args.model_name == "TextRNN":
        from configs import textrnn_args
        textrnn_args(parser)

    elif args.model_name == "LSTMATT":
        from configs import lstmatt_args
        lstmatt_args(parser)

    elif args.model_name == "TextRCNN":
        from configs import textrcnn_args
        textrcnn_args(parser)

    elif args.model_name == "TransformerText":
        from configs import transformer_args
        transformer_args(parser)

    config = parser.parse_args()
        
    main(config)

    
    
