#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2022/7/9 下午12:17
# @Author  : qiufengfeng
# @File    : new_training.py
# @Description :

from nlp_tools.utils import seed_tensorflow

from nlp_tools.corpus.ner.corpus_loader import ChineseDailyNerCorpus
from nlp_tools.processors import SequenceProcessor
from nlp_tools.processors.ner.span_sentence_label_processor import SpanSentenceLabelProcessor
from nlp_tools.tasks.labeling.span_classify_model import SpanClassifyModel
from nlp_tools.tokenizer.hugging_tokenizer import HuggingTokenizer
from nlp_tools.embeddings.hugginface.autoembedding import AutoEmbedding
from nlp_tools.callbacks.ner.global_pointer_callback import GlobalPointEvaluator


import tensorflow as tf


seed_tensorflow(2022)
bert_model_path = "hfl/chinese-bert-wwm-ext"
save_path ="tmp/ner"

train_data = ChineseDailyNerCorpus.load_data(corpus_path="/home/fanfanfeng/bak/nlp_data/competion/天池ner地址识别/train.conll",subset_name="")
valid_data = ChineseDailyNerCorpus.load_data(corpus_path="/home/fanfanfeng/bak/nlp_data/competion/天池ner地址识别/dev.conll",subset_name="")
print(train_data[:5])
print(valid_data[:5])

text_tokenizer  = HuggingTokenizer(bert_model_path)
embedding  = AutoEmbedding(bert_model_path,text_tokenizer.tokenizer.model_input_names)
# 初始化句子和标签processor
sequenceProcessor = SequenceProcessor(text_tokenizer=text_tokenizer)
labelProcessor = SpanSentenceLabelProcessor()

# 构建模型
model = SpanClassifyModel(embedding=embedding,text_processor=sequenceProcessor,label_processor=labelProcessor,use_rdrop=True,use_FGM=True)

ner_f1_save_callback = GlobalPointEvaluator(model,model_save_path=save_path,valid_data=valid_data)
model.fit(train_data,valid_data,epochs=50,callbacks=[ner_f1_save_callback],batch_size=50)


