#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   sentence_bert_training.py    
@Contact :   544855237@qq.com
@License :   (C)Copyright 2017-2018, Liugroup-NLPR-CASIA

@Modify Time      @Author    @Version    @Desciption
------------      -------    --------    -----------
2022/4/18 下午8:23   qiufengfeng      1.0         None
'''

# import lib

import random
import numpy as np
import tensorflow as tf
import os
from example.train_setting import train_settings


from nlp_tools.utils import seed_tensorflow
from nlp_tools.corpus.lcqmc import LCQMC
from nlp_tools.tokenizer.hugging_tokenizer import HuggingTokenizer
from nlp_tools.embeddings.hugginface.autoembedding import AutoEmbedding
from nlp_tools.processors.sequence_processor import SequenceProcessor
from nlp_tools.processors.class_processor import ClassificationProcessor
from nlp_tools.tasks.encode.cosine_sentence_encode_model import CosineSentenceEncodeModel
from nlp_tools.callbacks.embedding.corrcoef_callback import EncoderCorrcoefCallback
from nlp_tools.generators import EncoderGenerator




task_name = 'lcqmc' # 任务名称
seed_tensorflow(2021) # 固定随机种子
bert_model_name = "bert-base-chinese"#"hfl/chinese-bert-wwm-ext" # bert模型选中
model_save_path = 'train_model/'
max_sentence_length = 128 # 最大长度
label_list = [0,1]
label_dict = {str(key):index for index,key in enumerate(label_list)}

train_data = LCQMC.load_data(train_settings[task_name]['train'])
valid_data = LCQMC.load_data(train_settings[task_name]['valid'])
test_data = LCQMC.load_data(train_settings[task_name]['test'])





# 加载分词器
text_tokenizer  = HuggingTokenizer(bert_model_name)
embedding  = AutoEmbedding(bert_model_name,text_tokenizer.tokenizer.model_input_names)

# 默认是不需要分词或者对训练数据进行处理的，如果需要，则要重写text_tokenizer和相应的processor
sequenceProcessor = SequenceProcessor(text_tokenizer=text_tokenizer)
labelProcessor = ClassificationProcessor(label2id=label_dict)

model = CosineSentenceEncodeModel(
    embedding=embedding,
    text_processor=sequenceProcessor,
    label_processor=labelProcessor,
    use_rdrop=False,
    use_FGM=False,
    max_sequence_length=max_sentence_length)


f1_callback = EncoderCorrcoefCallback(model,model_save_path,test_data)

from keras.api._v2.keras.callbacks import  TensorBoard


tensorboad_callback = TensorBoard(log_dir=os.path.join(model_save_path,'tensorboard_logs'))
model.fit(train_data,validate_data=valid_data,epochs=60,callbacks=[f1_callback,tensorboad_callback],batch_size=32,generator=EncoderGenerator)
