import sys
import os
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_path)
from nlp_tools.corpus.ner.corpus_loader import JsonNerCorpus,ChineseDailyNerCorpus
from nlp_tools.tasks.labeling.global_point_model import GlobalPointModel
from nlp_tools.embeddings import BertEmbedding
from nlp_tools.processors import SequenceProcessor
from nlp_tools.processors.ner.global_pointer_label_processor import GlobalPointerLabelProcessor
from nlp_tools.callbacks.ner.global_pointer_callback import GlobalPointEvaluator
from tensorflow import keras
from nlp_tools.tokenizer.bert_tokenizer import BertTokenizer





model_type = 'bert'
bert_model_path = os.path.join(current_path,"pretrained_model/chinese_roberta_wwm_ext_L-12_H-768_A-12")
model_save_path = os.path.join(current_path,'global_pointer_model')
ccks_train = os.path.join(current_path,"train_data/train.conll")
ccks_dev =  os.path.join(current_path,"train_data/dev.conll")
test_data_path = r"/tcdata/final_test.txt"
submit_path = os.path.join(current_path,"result.txt")




if not os.path.exists(model_save_path):
    train_data = ChineseDailyNerCorpus.load_data(ccks_train,corpus_path="")
    valid_data = ChineseDailyNerCorpus.load_data(ccks_dev,corpus_path="")
    bert_model_token_path = os.path.join(bert_model_path,'vocab.txt')







    # 初始化分词器
    bert_tokenizer = BertTokenizer(token_dict=bert_model_token_path,simplified=True,do_lower_case=True)

    # 初始化句子和标签processor
    sequenceProcessor = SequenceProcessor(text_tokenizer=bert_tokenizer)
    labelProcessor = GlobalPointerLabelProcessor()

    # 初始化向量层embedding
    bert_embedding = BertEmbedding(model_folder=bert_model_path,model_type=model_type)


    # 构建模型
    model = GlobalPointModel(embedding=bert_embedding,text_processor=sequenceProcessor,label_processor=labelProcessor)

    early_stop = keras.callbacks.EarlyStopping(patience=15)
    reduse_lr_callback = keras.callbacks.ReduceLROnPlateau(factor=0.1, patience=3)
    ner_f1_save_callback = GlobalPointEvaluator(model,model_save_path=model_save_path,valid_data=valid_data)
    model.fit(train_data,valid_data,epochs=25,callbacks=[early_stop,ner_f1_save_callback],batch_size=128)
    #model.evaluate(x_test,y_test)



from output import make_output_file
make_output_file(model_save_path,test_data_path,submit_path)