import sys
import os
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_path)
from nlp_tools.corpus.ner.corpus_loader import JsonNerCorpus,ChineseDailyNerCorpus
from nlp_tools.tasks.labeling import BiLSTM_CRF_Model
from nlp_tools.tasks.labeling.bi_lstm_crf_add_residual_model import BiLstmCrfAddResidualModel
from nlp_tools.embeddings import BertEmbedding
from nlp_tools.processors import SequenceProcessor
from nlp_tools.processors.ner.ner_sentence_label_processor import NerSentenceLabelProcessor
from nlp_tools.callbacks.ner.global_pointer_callback import GlobalPointEvaluator
from tensorflow import keras
from nlp_tools.tokenizer.bert_tokenizer import BertTokenizer





model_type = 'bert'
bert_model_path = os.path.join(current_path,"pretrained_model/chinese_roberta_wwm_ext_L-12_H-768_A-12")
model_save_path = os.path.join(current_path,'bilstm_crf_residual')
ccks_train = os.path.join(current_path,"train_data/train.conll")
ccks_dev =  os.path.join(current_path,"train_data/dev.conll")
test_data_path = r"/tcdata/final_test.txt"
submit_path = os.path.join(current_path,"result.txt")



train_data = ChineseDailyNerCorpus.load_data(ccks_train,corpus_path="")
valid_data = ChineseDailyNerCorpus.load_data(ccks_dev,corpus_path="")
bert_model_token_path = os.path.join(bert_model_path,'vocab.txt')







# 初始化分词器
bert_tokenizer = BertTokenizer(token_dict=bert_model_token_path,simplified=True,do_lower_case=True)

# 初始化句子和标签processor
sequenceProcessor = SequenceProcessor(text_tokenizer=bert_tokenizer)
labelProcessor = NerSentenceLabelProcessor()

# 初始化向量层embedding
bert_embedding = BertEmbedding(model_folder=bert_model_path,model_type=model_type)


# 构建模型
model = BiLstmCrfAddResidualModel(embedding=bert_embedding,text_processor=sequenceProcessor,label_processor=labelProcessor)

early_stop = keras.callbacks.EarlyStopping(patience=15)
reduse_lr_callback = keras.callbacks.ReduceLROnPlateau(factor=0.1, patience=3)
ner_f1_save_callback = GlobalPointEvaluator(model,model_save_path=model_save_path,valid_data=valid_data)
model.fit(train_data,valid_data,epochs=20,callbacks=[early_stop,ner_f1_save_callback],batch_size=128)
#model.evaluate(x_test,y_test)


# bilstm people2014 0.93645


# bilstm  roberta  no multilayer  0.90644
# bilstm  roberta-w multiplayer 0.91025
# bilstm roberta-w FGM 0.91681
from output import make_output_file
make_output_file(model_save_path,test_data_path,submit_path)