from nlp_tools.corpus.lcqmc import LCQMC

import  os
from tensorflow import keras

from nlp_tools.tasks.simalary.simple_bert_model import SimpleBertModel
from nlp_tools.tokenizer.bert_tokenizer import BertTokenizer
from nlp_tools.processors.simalary.bert_simalary_sequence_processor import BertSimalarySequenceProcessor
from nlp_tools.processors.class_processor import ClassificationProcessor
from nlp_tools.embeddings import BertEmbedding

data_path_train = r'/home/qiufengfeng/nlp/nlp_data/encoder/lcqmc/train.txt'
data_path_dev = r'/home/qiufengfeng/nlp/nlp_data/encoder/lcqmc/dev.txt'
train_data= LCQMC.load_data(data_path_train)
valid_data = LCQMC.load_data(data_path_dev)


bert_model_path = r'/home/qiufengfeng/nlp/pre_trained_model/chinese_roberta_wwm_ext_L-12_H-768_A-12'
bert_model_token_path = os.path.join(bert_model_path,'vocab.txt')
bert_embedding = BertEmbedding(model_folder=bert_model_path,with_pool=True)

# 初始化分词器
bert_tokenizer = BertTokenizer(token_dict=bert_model_token_path,simplified=True,do_lower_case=True)

sequenceProcessor = BertSimalarySequenceProcessor(text_tokenizer=bert_tokenizer)
labelProcessor = ClassificationProcessor()


model = SimpleBertModel(embedding=bert_embedding,text_processor=sequenceProcessor,label_processor=labelProcessor)

early_stop = keras.callbacks.EarlyStopping(patience=10)
reduse_lr_callback = keras.callbacks.ReduceLROnPlateau(factor=0.1, patience=5)

model.fit(train_data,validate_data=valid_data,
          epochs=1,callbacks=[early_stop,reduse_lr_callback],
          batch_size=32
          )



