# coding: UTF-8
import time, os
import torch
import numpy as np
from importlib import import_module
import argparse

parser = argparse.ArgumentParser(description='Chinese Text Classification')
parser.add_argument('--model', type=str, required=True, help='choose a model: TextCNN, TextRNN, FastText, TextRCNN, TextRNN_Att, DPCNN, Transformer')
parser.add_argument('--embedding', default='embedding_SougouNews.npz', type=str, help='random or pre_trained')
parser.add_argument('--word', default=False, type=bool, help='True for word, False for char')
parser.add_argument('--dataset', default='THUCNews', type=str, help='数据文件夹')

# 共用
parser.add_argument('--dropout', default=0.2, type=float, help='')
parser.add_argument('--require_improvement', default=1000, type=int, help='若超过1000batch效果还没提升，则提前结束训练')
parser.add_argument('--num_epochs', default=20, type=int, help='')
parser.add_argument('--batch_size', default=128, type=int, help='')
parser.add_argument('--pad_size', default=32, type=int, help='')
parser.add_argument('--learning_rate', default=1e-3, type=float, help='Transformer默认5e-4')

# DPCNN/TextCNN
parser.add_argument('--num_filters', default=256, type=int, help='卷积核数量.DPCNN/TextCNN')

# fastText
parser.add_argument('--hidden_size', default=256, type=int, help='FastText/TextRCNN/TextRNN')
parser.add_argument('--n_gram_vocab', default=250499, type=int, help='ngram 词表大小/FastText')

# TextRCNN
parser.add_argument('--num_layers', default=1, type=int, help='TextRCNN/TextRNN_Att是2')

# Transformer
parser.add_argument('--dim_model', default=300, type=int, help='')
parser.add_argument('--hidden', default=1024, type=int, help='')
parser.add_argument('--last_hidden', default=512, type=int, help='')
parser.add_argument('--num_head', default=5, type=int, help='')
parser.add_argument('--num_encoder', default=2, type=int, help='')

# TextRNN_Att
parser.add_argument('--hidden_size2', default=64, type=int, help='TextRNN_Att')

# bert
parser.add_argument('--bert_path', default="./pretrain_models/bert-base-chinese ", type=str, help='')
parser.add_argument('--freeze_to_layer', default=None, type=str, help='例如 bert.pooler.dense.bias')

# 运行参数
parser.add_argument('--load_from', default=None, type=str, help='例如 bert.pooler.dense.bias')
parser.add_argument('--infer_mode', default=False, type=bool, help='')

args = parser.parse_args()

if __name__ == '__main__':
    assert args.model in ["TextCNN", "TextRNN", "FastText", "TextRCNN", "TextRNN_Att", "DPCNN", "Transformer",
                            "bert", "bert_CNN", "bert_DPCNN", "bert_RCNN", "bert_RNN"]
    if not os.path.exists(f"{args.dataset}"): raise FileNotFoundError("Data folder must exist!")
    if not os.path.exists(f"{args.dataset}/saved_dict"): os.mkdir(f"{args.dataset}/saved_dict")

    # 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:random
    if args.model == 'FastText':
        from train_eval import train, init_network
        import utils_fasttext as utils
        args.embedding = 'random'
    elif 'bert' in args.model:
        from train_eval_bert import train, init_network
        import utils_bert as utils
    else:
        from train_eval import train, init_network
        import utils as utils

    import_model = import_module('models.' + args.model)
    config = import_model.Config(args)
    np.random.seed(1)
    torch.manual_seed(1)
    torch.cuda.manual_seed_all(1)
    torch.backends.cudnn.deterministic = True  # 保证每次结果一样

    start_time = time.time()
    print("Loading data...")
    vocab, train_data, dev_data, test_data = utils.build_dataset(config, args)
    train_iter = utils.build_iterator(train_data, config)
    dev_iter = utils.build_iterator(dev_data, config)
    test_iter = utils.build_iterator(test_data, config)
    time_dif = utils.get_time_dif(start_time)
    print("Time usage:", time_dif)

    # train
    config.n_vocab = len(vocab)
    model = import_model.Model(config).to(config.device)
    if args.model != 'Transformer':
        init_network(model)
    print(model.parameters)
    train(config, model, train_iter, dev_iter, test_iter, args)

