import time
import sys
import argparse
import random
import torch
import pickle
import os
import numpy as np
from utils.data import Data
from pathes import *
from utils.main_utils import str2bool,data_initialization,load_model_decode,print_args,train

# 主函数
if __name__ == '__main__':
    # 选择数据集
    dataset = "python"
    if dataset == "python":
        # 训练集的路径
        dataset_path = python_ner_path  # pathes.py文件中的变量
        # 存放结果的路径
        dataset_saved_path = 'result/python'
        # 训练集文件名
        dataset_train = '/PY.train'
        dataset_dev = '/PY.dev'
        dataset_test = '/PY.test'
        # 标注标准
        encoding_type = 'bio'
    elif dataset == "resume":
        dataset_path = resume_ner_path  # pathes.py文件中的变量
        dataset_saved_path = 'result/resume'
        dataset_train = '/train.char.bmes'
        dataset_dev = '/dev.char.bmes'
        dataset_test = '/test.char.bmes'
        encoding_type = 'bmeso'
    elif dataset == "weibo":
        dataset_path = weibo_ner_path  # pathes.py文件中的变量
        dataset_saved_path = 'result/weibo'
        dataset_train = '/weiboNER_2nd_conll.train'
        dataset_dev = '/weiboNER_2nd_conll.dev'
        dataset_test = '/weiboNER.conll.test'
        encoding_type = 'bio'
    elif dataset == "weibo_old":
        dataset_path = weibo_ner_old_path  # pathes.py文件中的变量
        dataset_saved_path = 'result/weibo_old'
        dataset_train = '/weiboNER.conll.train'
        dataset_dev = '/weiboNER.conll.dev'
        dataset_test = '/weiboNER.conll.test'
        encoding_type = 'bio'
    elif dataset == "msra":
        dataset_path = msra_ner_cn_path  # pathes.py文件中的变量
        dataset_saved_path = 'result/msra'
        dataset_train = '/train.char.bio'
        dataset_dev = '/test.char.bio'
        dataset_test = '/test.char.bio'
        encoding_type = 'bio'
    elif dataset == "ontonote4":
        dataset_path = ontonote4ner_cn_path  # pathes.py文件中的变量
        dataset_saved_path = 'result/ontonote4'
        dataset_train = '/train.char.bio'
        dataset_dev = '/test.char.bio'
        dataset_test = '/test.char.bio'
        encoding_type = 'bio'

    # 获取设备信息
    if torch.cuda.is_available():
        device = torch.device("cuda:0")
        use_gpu = True
    else:
        device = torch.device("cpu")
        use_gpu = False
    print("设备:", device)


    parser = argparse.ArgumentParser()
    parser.add_argument('--status', choices=['train', 'test', 'decode'], help='Function status.', default='train')
    # ----选择模型----
    parser.add_argument('--model',default='GraphTrans',help='Graph|GraphBiLSTM|GraphTrans')
    # ----数据集----
    parser.add_argument('--train', help='Training set.', default=dataset_path + dataset_train)
    parser.add_argument('--dev', help='Developing set.', default=dataset_path + dataset_dev)
    parser.add_argument('--test', help='Testing set.', default=dataset_path + dataset_dev)
    parser.add_argument('--raw', help='Raw file for decoding.')
    parser.add_argument('--output', help='Output results for decoding.')
    parser.add_argument('--encoding_type', help='数据集标注标注.', default="BIO")  # BIO BMES
    # ----实验保存结果---
    #   1.存储预处理后的数据
    parser.add_argument('--saved_set', help='Path of saved data set.', default=dataset_saved_path + '/saved.dset')
    #   2.存储模型
    parser.add_argument('--saved_model', help='Path of saved model.', default=dataset_saved_path + "/saved_model")
    # ----实验参数----
    #   1.轮次+批次
    parser.add_argument('--batch_size', help='Batch size.', default=16, type=int)
    parser.add_argument('--num_epoch', default=200, type=int, help="Epoch number.")
    #   2.学习率
    parser.add_argument('--lr', type=float, default=2e-05)
    parser.add_argument('--lr_decay', type=float, default=0)
    parser.add_argument('--weight_decay', type=float, default=0)
    # ----嵌入层----
    # 字符嵌入与单词嵌入的文件地址
    parser.add_argument('--char_emb', help='Path of character embedding file.',
                        default=yangjie_rich_pretrain_unigram_path)
    parser.add_argument('--word_emb', help='Path of word embedding file.', default=yangjie_rich_pretrain_word_path)
    # ----图----
    # 1）标签数量
    # 自动设置
    # 2）字符对应的最大单词数量
    # 在模型内设置
    # 3）模型组件的开启
    parser.add_argument('--use_crf', type=str2bool, default=True)
    parser.add_argument('--use_edge', type=str2bool, default=True, help='If use lexicon embeddings (edge embeddings).')
    parser.add_argument('--use_global', type=str2bool, default=True, help='If use the global node.')
    parser.add_argument('--bidirectional', type=str2bool, default=True, help='If use bidirectional digraph.')
    # 4）注意机制的参数(本地整合+全局整合)
    parser.add_argument('--hidden_dim', default=50, type=int, help='Hidden state size.')
    parser.add_argument('--num_head', default=10, type=int, help='Number of transformer head.')
    parser.add_argument('--head_dim', default=20, type=int, help='Head dimension of transformer.')
    parser.add_argument('--tf_drop_rate', default=0.1, type=float) # 注意机制的dropout
    # 5）图的层数
    parser.add_argument('--iters', default=4, type=int, help='The number of Graph iterations.')
    # 6）正则相关参数
    parser.add_argument('--emb_drop_rate', default=0.5, type=float)# 嵌入层之后
    parser.add_argument('--cell_drop_rate', default=0.2, type=float)# 更新模块中HTML-CELL对输入的正则率(函数默认值为0.2)
    parser.add_argument('--output_dropout_rate', default=0.2, type=float)# Grpah层之后
    # ---- Transformer层 ----
    parser.add_argument('--num_layers', type=int, default=4)
    parser.add_argument('--n_head', type=int, default=6)
    parser.add_argument('--d_model', type=int, default=120)
    parser.add_argument('--att_dropout_rate', type=float, default=0.15)
    parser.add_argument('--trans_dropout_rate', type=float, default=0.4)
    # 不需要手动设置的参数
    parser.add_argument('--char_dim', type=int, help='Char embedding size.')
    parser.add_argument('--word_dim', type=int, help='Word embedding size.')
    parser.add_argument('--word_alphabet_size', type=int, help='Word alphabet size.')
    parser.add_argument('--char_alphabet_size', type=int, help='Char alphabet size.')
    parser.add_argument('--label_alphabet_size', type=int, help='Label alphabet size.')
    # 随机种子
    parser.add_argument('--seed', help='Random seed', default=1023, type=int)
    args = parser.parse_args()
    args.use_gpu=use_gpu
    status = args.status.lower()
    # 设置随机函数的种子
    seed_num = args.seed
    random.seed(seed_num)
    torch.manual_seed(seed_num)
    np.random.seed(seed_num)
    # 路径信息
    train_file = args.train
    dev_file = args.dev
    test_file = args.test
    raw_file = args.raw
    output_file = args.output
    saved_set_path = args.saved_set
    saved_model_path = args.saved_model
    char_file = args.char_emb
    word_file = args.word_emb
    encoding_type = args.encoding_type
    #
    if status == 'train':
        # 查看saved_set_path是否存在已经处理过的数据,否则进行数据预处理
        if os.path.exists(saved_set_path):
            print('Loading saved data set...')
            with open(saved_set_path, 'rb') as f:
                data = pickle.load(f)
        else:
            data = Data()
            data_initialization(data, word_file, train_file, dev_file, test_file)
            data.generate_instance_with_words(train_file, 'train')
            data.generate_instance_with_words(dev_file, 'dev')
            data.generate_instance_with_words(test_file, 'test')
            data.build_char_pretrain_emb(char_file)
            data.build_word_pretrain_emb(word_file)
            if saved_set_path is not None:
                print('Dumping data...')
                with open(saved_set_path, 'wb') as f:
                    pickle.dump(data, f)
        data.show_data_summary()
        # 设置嵌入的维度
        args.word_alphabet_size = data.word_alphabet.size()
        args.char_alphabet_size = data.char_alphabet.size()
        args.label_alphabet_size = data.label_alphabet.size()
        args.char_dim = data.char_emb_dim
        args.word_dim = data.word_emb_dim
        print_args(args)
        train(args.model,data, args, saved_model_path,encoding_type)
    # 进行测试
    elif status == 'test':
        assert not (test_file is None)
        if os.path.exists(saved_set_path):
            print('Loading saved data set...')
            with open(saved_set_path, 'rb') as f:
                data = pickle.load(f)
        else:
            print("Cannot find saved data set: ", saved_set_path)
            exit(0)
        data.generate_instance_with_words(test_file, 'test')
        with open(saved_model_path + "_best_HP.config", "rb") as f:
            args = pickle.load(f)
        data.show_data_summary()
        print_args(args)
        load_model_decode(saved_model_path, data, args, "test", encoding_type)
    # 利用raw文件进行预测
    elif status == 'decode':
        assert not (raw_file is None or output_file is None)
        if os.path.exists(saved_set_path):
            print('Loading saved data set...')
            with open(saved_set_path, 'rb') as f:
                data = pickle.load(f)
        else:
            print("Cannot find saved data set: ", saved_set_path)
            exit(0)
        data.generate_instance_with_words(raw_file, 'raw')
        with open(saved_model_path + "_best_HP.config", "rb") as f:
            args = pickle.load(f)
        data.show_data_summary()
        print_args(args)
        decode_results = load_model_decode(saved_model_path, data, args, "raw", encoding_type)
        data.write_decoded_results(output_file, decode_results, 'raw')
    else:
        print("Invalid argument! Please use valid arguments! (train/test/decode)")