from models.TENER import TENER
from fastNLP import cache_results
from fastNLP import Trainer, GradientClipCallback, WarmupCallback
from torch import optim
from fastNLP import SpanFPreRecMetric, BucketSampler
from fastNLP.embeddings import StaticEmbedding
from modules.pipe import CNNERPipe

import argparse
from modules.callbacks import EvaluateCallback


def main():
    # 设备
    device = "cpu"
    # 路径
    root_path = '/Users/zhangwentao'
    # root_path = '/home/ubuntu/zwt'
    # root_path = 'E:'
    parser = argparse.ArgumentParser()
    # 数据集
    parser.add_argument('--dataset', type=str, default='python',
                        choices=['weibo', 'resume', 'ontonotes', 'msra', 'python'])

    args = parser.parse_args()

    dataset = args.dataset
    encoding_type = 'bmeso'
    attn_type = 'adatrans'
    n_epochs = 300
    batch_size = 16

    # RESUME数据集的参数
    if dataset == 'resume':
        n_heads = 4
        head_dims = 64
        num_layers = 2
        lr = 0.0007
        encoding_type = 'bmeso'
    # PYTHON数据集的参数
    elif dataset == 'python':
        num_layers = 2
        n_heads = 6
        head_dims = 80
        lr = 0.0007
        # lr = 0.001
        encoding_type = 'bio'

    pos_embed = None


    warmup_steps = 0.01
    after_norm = 1
    model_type = 'transformer'
    normalize_embed = True

    dropout = 0.15
    fc_dropout = 0.4

    name = 'caches/{}_{}_{}_{}.pkl'.format(dataset, model_type, encoding_type, normalize_embed)
    # Transformer参数
    d_model = n_heads * head_dims
    dim_feedforward = int(2 * d_model)

    # 加载数据
    @cache_results(name, _refresh=False)
    def load_data():
        # 替换路径
        if dataset == 'ontonotes':
            paths = {'train': root_path + '/data/corpus/sequence_labelling/chinese_ner/OntoNote4NER/train.char.bmes',
                     "dev": root_path + '/data/corpus/sequence_labelling/chinese_ner/OntoNote4NER/dev.char.bmes',
                     "test": root_path + '/data/corpus/sequence_labelling/chinese_ner/OntoNote4NER/test.char.bmes'}
            min_freq = 2
        elif dataset == 'weibo':
            paths = {
                'train': root_path + '/data/corpus/sequence_labelling/chinese_ner/WeiboNER/weiboNER_2nd_conll.train',
                'dev': root_path + '/data/corpus/sequence_labelling/chinese_ner/WeiboNER/weiboNER_2nd_conll.dev',
                'test': root_path + '/data/corpus/sequence_labelling/chinese_ner/WeiboNER/weiboNER_2nd_conll.test'}
            min_freq = 1
        elif dataset == 'resume':
            paths = {'train': root_path + '/data/corpus/sequence_labelling/chinese_ner/ResumeNER/train.char.bmes',
                     'dev': root_path + '/data/corpus/sequence_labelling/chinese_ner/ResumeNER/dev.char.bmes',
                     'test': root_path + '/data/corpus/sequence_labelling/chinese_ner/ResumeNER/test.char.bmes'}
            min_freq = 1
        elif dataset == 'msra':
            paths = {'train': root_path + '/data/corpus/sequence_labelling/chinese_ner/MSRANER/train.char.bio',
                     'dev': root_path + '/data/corpus/sequence_labelling/chinese_ner/MSRANER/test.char.bio',
                     'test': root_path + '/data/corpus/sequence_labelling/chinese_ner/MSRANER/test.char.bio'}
            min_freq = 1
        elif dataset == 'python':
            paths = {'train': root_path + '/data/corpus/sequence_labelling/chinese_ner/PythonNER/PY.train',
                     'dev': root_path + '/data/corpus/sequence_labelling/chinese_ner/PythonNER/PY.dev',
                     'test': root_path + '/data/corpus/sequence_labelling/chinese_ner/PythonNER/PY.test'}
            min_freq = 1
        # 读取数据
        data_bundle = CNNERPipe(bigrams=True, encoding_type=encoding_type).process_from_file(paths)
        # 加载预嵌入
        #   参数:
        #       min_freq: Vocabulary词频数小于这个数量的word将被指向unk。
        embed = StaticEmbedding(data_bundle.get_vocab('chars'),
                                model_dir_or_name=root_path + '/data/pretrain/chinese/gigaword_chn.all.a2b.uni.ite50.vec',
                                min_freq=1, only_norm_found_vector=normalize_embed, word_dropout=0.01, dropout=0.3)

        bi_embed = StaticEmbedding(data_bundle.get_vocab('bigrams'),
                                   model_dir_or_name=root_path + '/data/pretrain/chinese/gigaword_chn.all.a2b.bi.ite50.vec',
                                   word_dropout=0.02, dropout=0.3, min_freq=min_freq,
                                   only_norm_found_vector=normalize_embed, only_train_min_freq=True)

        return data_bundle, embed, bi_embed

    data_bundle, embed, bi_embed = load_data()
    print(data_bundle)

    model = TENER(tag_vocab=data_bundle.get_vocab('target'), embed=embed,
                  num_layers=num_layers,d_model=d_model, n_head=n_heads,
                  feedforward_dim=dim_feedforward, dropout=dropout,
                  after_norm=after_norm, attn_type=attn_type,
                  bi_embed=bi_embed,
                  fc_dropout=fc_dropout,
                  pos_embed=pos_embed,
                  scale=attn_type == 'transformer')

    optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)

    # 使用callbacks自定义训练过程
    callbacks = []
    #   1.每次backward前，将parameter的gradient clip到某个范围。
    #       clip_value (float) -- 将gradient 限制到[-clip_value, clip_value]。clip_value应该为正数。
    #       clip_type (str) --
    #                           'norm':将gradient的norm rescale到[-clip_value, clip_value]
    #                           'value':将gradient限制在[-clip_value, clip_value],
    #                                   小于-clip_value的gradient被赋值为-clip_value;
    #                                   大于clip_value的gradient被赋值为clip_value.
    clip_callback = GradientClipCallback(clip_type='value', clip_value=5)
    #   2.用于测试.
    evaluate_callback = EvaluateCallback(data_bundle.get_dataset('test'))
    #   3.learning rate按照一定的速率从0上升到设置的learning rate。
    #       warmup (int,float) --
    #                       int，则在该step之前，learning rate根据schedule的策略变化;
    #                       float， 如0.1, 则前10%的step是按照schedule策略调整learning rate。
    #       schedule (str) -- 以哪种方式调整。
    #                       linear: 前warmup的step上升到指定的learning rate(从Trainer中的optimizer处获取的), 后warmup的step下降到0；
    #                       constant前warmup的step上升到指定learning rate，后面的step保持learning rate.
    if warmup_steps > 0:
        warmup_callback = WarmupCallback(warmup_steps, schedule='linear')
        callbacks.append(warmup_callback)
    callbacks.extend([clip_callback, evaluate_callback])

    trainer = Trainer(data_bundle.get_dataset('train'), model, optimizer, batch_size=batch_size,
                      sampler=BucketSampler(),
                      num_workers=0, n_epochs=n_epochs, dev_data=data_bundle.get_dataset('dev'),
                      metrics=SpanFPreRecMetric(tag_vocab=data_bundle.get_vocab('target'), encoding_type=encoding_type),
                      dev_batch_size=batch_size, callbacks=callbacks, device=device, test_use_tqdm=False,
                      use_tqdm=True, save_path=None)
    trainer.train(load_best_model=False)


if __name__ == "__main__":
    main()
