import os
from nlp_tools.corpus import SMP2018ECDTCorpus,DialogueCorpusLoader
from nlp_tools.metrics.classification import F1CategoryCallback
from nlp_tools.tasks.classification import ClassificationCnn
from nlp_tools.tasks.classification.cnn_attention_model import CNN_Attention_Model
from nlp_tools.tasks.classification.entity_level_model import ClassificationEntityLevel
from nlp_tools.processors.dialogue_sequence_processor import DialogueSequenceProcessor
from nlp_tools.processors.class_processor import ClassificationProcessor
from nlp_tools.tokenizer.bert_tokenizer import BertTokenizer
from nlp_tools.embeddings.bert_embedding import BertEmbedding

from nlp_tools.callbacks.classification.f1score_save_callback import F1SaveCallback


model_save_path = '/home/qiufengfeng/nlp/train_models/bad_cargonName/'

train_data = DialogueCorpusLoader.load_data("/home/fanfanfeng/working_data/nlp_data/working/classify/train.csv")
valid_data = DialogueCorpusLoader.load_data("/home/fanfanfeng/working_data/nlp_data/working/classify/test.csv")


bert_model_path = "/home/fanfanfeng/working_data/pretrained_models/tensorflow/chinese_wwm_ext_L-12_H-768_A-12"
label_list = ["是","不是","不确定"]
label_dict = {key:index for index,key in enumerate(label_list)}


embedding  = BertEmbedding(bert_model_path)

text_tokenizer  = BertTokenizer(token_dict=os.path.join(bert_model_path,'vocab.txt'))
# 默认是不需要分词或者对训练数据进行处理的，如果需要，则要重写text_tokenizer和相应的processor
sequenceProcessor = DialogueSequenceProcessor(text_tokenizer=text_tokenizer,return_ner_masking=True)
labelProcessor = ClassificationProcessor(label2id=label_dict)
#model = BiLSTM_CRF_Model(encoder=bert_embedding,text_processor=sequenceProcessor,label_processor=labelProcessor)
model = ClassificationEntityLevel(embedding=embedding,text_processor=sequenceProcessor,label_processor=labelProcessor,use_rdrop=False,use_FGM=False)
#model = ClassificationCnn(encoder=encoder,text_processor=sequenceProcessor,label_processor=labelProcessor,use_rdrop=False,use_FGM=False)
model_save_path = 'train_model/'
f1_callback = F1SaveCallback(model,model_save_path,valid_data,label_names=label_list)

from transformers import AdamWeightDecay
from tensorflow.keras.callbacks import TensorBoard
tensorboad_callback = TensorBoard(log_dir=os.path.join(model_save_path,'tensorboard_logs'),write_graph=True,write_images=True)
model.fit(train_data,validate_data=valid_data,epochs=60,callbacks=[f1_callback,tensorboad_callback],batch_size=32)

