import random
from tkinter.tix import Tree
import numpy
from ner.ner_case import NerCase
from ner.ner_model import NerModel
from ner.dataset import Dataset
from ner.trainer import Trainer
import torch
from word_vec_encoders.char_word_vec_encoder import CharWordVecEncoder
from word_vec_encoders.glyph_word_vec_encoder import GlyphWordVecEncoder
from word_vec_encoders.pinyin_word_vec_encoder import PinyinWordVecEncoder
from word_vec_encoders.stroke_word_vec_encoder import StrokeWordVecEncoder
from word_vec_encoders.word_vec_encoder_set import WordVecEncoderSet
from word_vec_encoders.wubi_word_vec_encoder import WubiWordVecEncoder
from word_vec_encoders.zhengma_word_vec_encoder import ZhengmaWordVecEncoder
from word_vec_encoders.knowlege_injection_encoder import KnowlegeInjectionWordVecEncoder


def do_experiment(bilstm:bool, dynamic_fusion:bool, stroke:bool, wubi:bool, zhengma:bool, glyph:bool, char:bool, pinyin:bool, knowlege:bool, dataset_name:str, batch_size:int = 32, pretrained_model_name:str='hfl/chinese-roberta-wwm-ext-large', num_epochs:int = 40):
    torch.cuda.empty_cache()
    train_path = f'./data/{dataset_name}/train/'
    test_path = f'./data/{dataset_name}/test/'
    corpus_type = f'{dataset_name}'
    num_epochs = 35 if dataset_name == 'ccks' else 25
    word_vector_encoder_set = WordVecEncoderSet()
    if stroke: word_vector_encoder_set.add_word_vec_encoder(StrokeWordVecEncoder())
    if wubi: word_vector_encoder_set.add_word_vec_encoder(WubiWordVecEncoder())
    if zhengma: word_vector_encoder_set.add_word_vec_encoder(ZhengmaWordVecEncoder())
    if glyph: word_vector_encoder_set.add_word_vec_encoder(GlyphWordVecEncoder())
    if char: word_vector_encoder_set.add_word_vec_encoder(CharWordVecEncoder())
    if pinyin: word_vector_encoder_set.add_word_vec_encoder(PinyinWordVecEncoder())
    if knowlege: word_vector_encoder_set.add_word_vec_encoder(KnowlegeInjectionWordVecEncoder())
    case = NerCase(pretrained_model_name = pretrained_model_name, # 使用预训练语言模型
                    use_dynamic_fusion = dynamic_fusion, # 动态融合
                    num_dynamic_fusion_layers = 1, # 动态融合权重网络的层数
                    word_vector_encoder_set = word_vector_encoder_set, # 使用额外特征
                    use_bilstm = bilstm, # 使用 BiLSTM
                    num_bilstm_layers = 1, # BiLSTM 层数
                    use_crf = True, # 使用 CRF
                    )
    case.train(num_epochs = num_epochs, # 训练轮数
                test_every_n_epoch = 1, # 每几轮测试一次
                save_every_n_epoch = 10, # 每几轮保存一次模型
                dataset_name=dataset_name,
                train_corpus_path = train_path, # 训练集的位置
                test_corpus_path = test_path, # 测试集的位置
                corpus_type = corpus_type, # 语料的类型
                batch_size = batch_size, # 批次大小
                data_loader_num_workers = 0, # 在主线程载入数据
                learning_rate = 1e-4,
                )


if __name__ == '__main__':    

    # 固定随机数种子, 便于复现
    random.seed(1)
    numpy.random.seed(1)
    torch.manual_seed(1)     

    datasets = ['ccks', 'aiaiyi']

    # bilstm, dynamic_fusion, stroke, wubi, zhengma, glyph, word2vec, pinyin
    # do_experiment(True, False, False, False, False, False, False, False)
    # do_experiment(True, True, False, False, False, False, False, False)     #+ 动态融合
    # do_experiment(True, False, False, False, False, True, False, False)     #+ 汉字图像
    # do_experiment(True, True, False, True, False, False, False, False, 24)      #+ 动态融合 + 汉字五笔
    # do_experiment(True, True, False, False, False, True, False, False, 24)      #+ 动态融合 + 汉字图像
    # do_experiment(True, False, False, True, False, True, False, False, 24)      #+ 汉字五笔 + 汉字图像
    # do_experiment(True, True, False, True, False, True, False, False, 24)       #+ 动态融合 + 汉字图像 + 汉字五笔
    for dataset in datasets:
        do_experiment(True, False, False, False, True, False, False, False, False, dataset)      # + 郑码
        do_experiment(True, False, False, True, False, False, False, False, False, dataset)      # + 五笔
        do_experiment(True, False, True, False, False, False, False, False, False, dataset)      # + 笔画
        do_experiment(True, False, False, False, False, False, False, True, False, dataset)      # + 拼音

    # do_experiment(True, False, False, True, True, False, False, False, dataset, 24)      # + 五笔 + 郑码
    # do_experiment(True, False, True, False, False, False, False, True, dataset, 24)      # + 笔画 + 拼音
    # do_experiment(True, False, True, False, True, False, False, True, dataset, 24)      # + 郑码 + 笔画 + 拼音
    # do_experiment(True, False, True, True, False, False, False, True, dataset, 24)      # + 五笔 + 笔画 + 拼音
    # do_experiment(True, False, True, True, True, False, False, False, dataset, 24)      # + 郑码 + 五笔 + 笔画
    # do_experiment(True, False, False, True, True, False, False, True, dataset, 24)      # + 郑码 + 五笔 + 拼音
    # do_experiment(True, False, True, True, True, False, False, True, dataset, 24)      # + 郑码 + 五笔 + 笔画 + 拼音

    # do_experiment(True, False, False, False, False, False, True, False, 'aiaiyi', 128, None, 200) # bilstm
    # do_experiment(True, False, False, False, False, False, False, False, 'aiaiyi', 32, 'uer/albert-base-chinese-cluecorpussmall')
    # do_experiment(True, False, False, False, False, False, False, False, 'aiaiyi', 32, 'hfl/chinese-macbert-large')
    # do_experiment(True, False, False, False, False, False, False, False, 'aiaiyi', 32, 'nghuyong/ernie-health-zh')

    # do_experiment(True, False, True, True, True, False, False, True, dataset, 24)      #+ 四拼
    # do_experiment(True, True, True, True, True, False, False, True, dataset, 24)      #+ 动态融合 + 四拼
    # do_experiment(True, False, True, True, True, True, False, True, dataset, 24)      #+ 图像 + 四拼
    # do_experiment(True, True, True, True, True, True, False, True, dataset, 24)       #+ 动态融合 + 图像 + 四拼

    # do_experiment(True, False, False, False, False, False, False, False, 'ccks', 32, 'bert-base-chinese', 40)

    #do_experiment(True, False, False, False, False, False, False, False, dataset, 32, 'nghuyong/ernie-3.0-xbase-zh', 200)
