import sys
sys.path.append("/data/python_project/qiufengfeng/nlp_tools")
from nlp_tools.corpus.ner.corpus_loader import JsonNerCorpus,ChineseDailyNerCorpus
from nlp_tools.tasks.labeling import BiLSTM_CRF_Model
from nlp_tools.tasks.labeling.span_classify_model import SpanClassifyModel
from nlp_tools.embeddings import BertEmbedding
from nlp_tools.processors import SequenceProcessor
from nlp_tools.processors.ner.span_sentence_label_processor import SpanSentenceLabelProcessor
from nlp_tools.callbacks.ner.global_pointer_callback import GlobalPointEvaluator
from tensorflow import keras
import os



model_type = 'bert'
if 'win' in sys.platform:
    bert_model_path = r"F:\pretrain_model\bert\chinese_L-12_H-768_A-12"
    save_path = r'E:\model_output\ner'
else:
    # bert base
    #bert_model_path = r'/home/qiufengfeng/nlp/pre_trained_model/chinese_L-12_H-768_A-12/chinese_L-12_H-768_A-12/'

    # roberta large wwm
    #bert_model_path = r'/home/qiufengfeng/nlp/pre_trained_model/chinese_roberta_wwm_large_ext_L-24_H-1024_A-16'
    # roberta base
    #bert_model_path = r"/home/qiufengfeng/nlp/pre_trained_model/roberta_zh_l12"

    # roberta wwm
    bert_model_path = r'/home/qiufengfeng/nlp/pre_trained_model/chinese_roberta_wwm_ext_L-12_H-768_A-12'
    # roberta wwn pretained
    #bert_model_path = r'/home/qiufengfeng/nlp/pre_trained_model/tianci1'


    # electra
    #bert_model_path = r'/home/qiufengfeng/nlp/pre_trained_model/chinese_electra_base_L-12_H-768_A-12'
    #model_type = 'electra'

    save_path = '/home/qiufengfeng/nlp/train_models/ner/company_name/bilstm_crf_data_agument'
    #model_type = 'electra'







# glob_path_test = '/home/qiufengfeng/nlp/nlp_project/github_third/TransformersNer/ner_train/data/*test.json'
# glob_path_train = '/home/qiufengfeng/nlp/nlp_project/github_third/TransformersNer/ner_train/data/*train.json'
# glob_path_dev = '/home/qiufengfeng/nlp/nlp_project/github_third/TransformersNer/ner_train/data/*dev.json'
#
# train_data = JsonNerCorpus(end_position_plus=True).load_data(glob_path_train)
# valid_data = JsonNerCorpus(end_position_plus=True).load_data(glob_path_dev)
# test_data = JsonNerCorpus().load_data(glob_path_test)
#
# train_data = ChineseDailyNerCorpus.load_data('train')
# valid_data = ChineseDailyNerCorpus.load_data('eval')
# test_data = ChineseDailyNerCorpus.load_data('test')

ccks_train = '/home/fanfanfeng/bak/nlp_data/competion/天池ner地址识别/train.conll'
ccks_dev = '/home/fanfanfeng/bak/nlp_data/competion/天池ner地址识别/dev.conll'
train_data = ChineseDailyNerCorpus.load_data(ccks_train)
valid_data = ChineseDailyNerCorpus.load_data(ccks_dev)
save_path = '/roberta_crf_residual_with_pretrained'
bert_model_token_path = os.path.join(bert_model_path,'vocab.

#from nlp_tools.data_augmentation.ner_data_augment import NerDataAugment
#train_data = NerDataAugment(train_data).augment()

#import tensorflow as tf
#tf.config.experimental_run_functions_eagerly(True)

from nlp_tools.tokenizer.bert_tokenizer import BertTokenizer

# 初始化分词器
bert_tokenizer = BertTokenizer(token_dict=bert_model_token_path,simplified=True,do_lower_case=True)

# 初始化句子和标签processor
sequenceProcessor = SequenceProcessor(text_tokenizer=bert_tokenizer)
labelProcessor = SpanSentenceLabelProcessor()

# 初始化向量层embedding
bert_embedding = BertEmbedding(model_folder=bert_model_path,model_type=model_type)


# 构建模型
model = SpanClassifyModel(embedding=bert_embedding,text_processor=sequenceProcessor,label_processor=labelProcessor)

early_stop = keras.callbacks.EarlyStopping(patience=20)
reduse_lr_callback = keras.callbacks.ReduceLROnPlateau(factor=0.1, patience=3)
ner_f1_save_callback = GlobalPointEvaluator(model,model_save_path=save_path,valid_data=valid_data)
model.fit(train_data,valid_data,epochs=50,callbacks=[early_stop,ner_f1_save_callback],batch_size=50)
#model.evaluate(x_test,y_test)


# bilstm people2014 0.93645


# bilstm  roberta  no multilayer  0.90644
# bilstm  roberta-w multiplayer 0.91025
# bilstm roberta-w FGM 0.91681