import sys
sys.path.append("/data/python_project/qiufengfeng/nlp_tools")
from tensorflow import keras
import os

from nlp_tools.corpus.ner.global_pointer.cluener_global_pointer_loader import CLUENERGlobalPointerLoader
from nlp_tools.corpus.ner.corpus_loader import JsonNerCorpus,ChineseDailyNerCorpus
from nlp_tools.tasks.labeling.global_point_model import GlobalPointModel
from nlp_tools.embeddings import BertEmbedding
from nlp_tools.processors import SequenceProcessor
from nlp_tools.processors.ner.global_pointer_label_processor import GlobalPointerLabelProcessor
from nlp_tools.tokenizer.bert_tokenizer import BertTokenizer
from nlp_tools.callbacks.ner.global_pointer_callback import GlobalPointEvaluator
from nlp_tools.generators import BatchGenerator

import tensorflow as tf
#tf.config.experimental_run_functions_eagerly(True)

if 'win' in sys.platform:
    bert_model_path = r"F:\pretrain_model\bert\chinese_L-12_H-768_A-12"
    save_path = r'E:\model_output\ner'
    corpus_path = ""
else:
    # bert base
    # bert_model_path = r'/home/qiufengfeng/nlp/pre_trained_model/chinese_L-12_H-768_A-12/chinese_L-12_H-768_A-12/'

    # roberta large wwm
    # bert_model_path = r'/home/qiufengfeng/nlp/pre_trained_model/chinese_roberta_wwm_large_ext_L-24_H-1024_A-16'
    # roberta base
    # bert_model_path = r"/home/qiufengfeng/nlp/pre_trained_model/roberta_zh_l12"

    # roberta wwm
    bert_model_path = r'/home/qiufengfeng/nlp/pre_trained_model/chinese_roberta_wwm_ext_L-12_H-768_A-12'
    # roberta wwn pretained
    #bert_model_path = r'/home/qiufengfeng/nlp/pre_trained_model/tianci1'
    save_path = '/home/qiufengfeng/nlp/train_models/ner/company_name/globalpoint_1'
    corpus_path = '/home/qiufengfeng/nlp/nlp_data/ner/cluener'
bert_model_token_path = os.path.join(bert_model_path,'vocab.txt')



# glob_path_test = '/home/qiufengfeng/nlp/nlp_project/github_third/TransformersNer/ner_train/data/*test.json'
# glob_path_train = '/home/qiufengfeng/nlp/nlp_project/github_third/TransformersNer/ner_train/data/*train.json'
# glob_path_dev = '/home/qiufengfeng/nlp/nlp_project/github_third/TransformersNer/ner_train/data/*dev.json'
# train_data = JsonNerCorpus(end_position_plus=True).load_data(glob_path_train)
# dev_data = JsonNerCorpus().load_data(glob_path_dev)
# test_data = JsonNerCorpus().load_data(glob_path_test)


# # 训练数据加载器
# train_data = CLUENERGlobalPointerLoader.load_data(corpus_path=corpus_path,subset_name='train')
# dev_data = CLUENERGlobalPointerLoader.load_data(corpus_path=corpus_path,subset_name='dev')

ccks_train = '/home/qiufengfeng/nlp/competition/天池/CCKS2021中文NLP地址要素解析/data/train.conll'
ccks_dev = '/home/qiufengfeng/nlp/competition/天池/CCKS2021中文NLP地址要素解析/data/dev.conll'
train_data = ChineseDailyNerCorpus.load_data(ccks_train)
dev_data = ChineseDailyNerCorpus.load_data(ccks_dev)
save_path = '/home/qiufengfeng/nlp/competition/天池/CCKS2021中文NLP地址要素解析/globalpoint_fgm_rdrop_pretained'



# 初始化分词器
bert_tokenizer = BertTokenizer(token_dict=bert_model_token_path,do_lower_case=True,simplified=True)


# 初始化句子和标签processor
sequenceProcessor = SequenceProcessor(text_tokenizer=bert_tokenizer)
labelProcessor = GlobalPointerLabelProcessor()

# 初始化向量层embedding
bert_embedding = BertEmbedding(model_folder=bert_model_path)



# 构建模型
model = GlobalPointModel(embedding=bert_embedding,text_processor=sequenceProcessor,label_processor=labelProcessor,use_FGM=True,use_rdrop=True)

early_stop = keras.callbacks.EarlyStopping(patience=20)
reduse_lr_callback = keras.callbacks.ReduceLROnPlateau(factor=0.1, patience=5)
ner_f1_save_callback = GlobalPointEvaluator(model,save_path,dev_data)
model.fit(train_data,validate_data=dev_data,epochs=50,callbacks=[early_stop,ner_f1_save_callback],generator=BatchGenerator,batch_size=50)


#people 2014 best f1:0.76694


# global point roberta 0.91773
# global point roberta FGM data_argument  0.92167