import sys
sys.path.append("/data/python_project/qiufengfeng/nlp_tools")

from nlp_tools.tasks.labeling.kfold_crf_model import KfoldCrfModel
from nlp_tools.processors.ner.kfold_ner_sentence_label_processor import KfoldNerSentenceLabelProcessor
from nlp_tools.callbacks.ner.global_pointer_callback import GlobalPointEvaluator
from tensorflow import keras
from tensorflow.keras.preprocessing.sequence import  pad_sequences


from nlp_tools.corpus.ner.corpus_loader import ChineseDailyNerCorpus
ccks_train = '/home/qiufengfeng/nlp/competition/天池/CCKS2021中文NLP地址要素解析/data/train.conll'
ccks_dev = '/home/qiufengfeng/nlp/competition/天池/CCKS2021中文NLP地址要素解析/data/dev.conll'
train_data = ChineseDailyNerCorpus.load_data(ccks_train)
valid_data = ChineseDailyNerCorpus.load_data(ccks_dev)
save_path = '/home/qiufengfeng/nlp/competition/天池/CCKS2021中文NLP地址要素解析/crf'

new_train_data = []
for data in train_data:
    new_train_data.append([[data[0],[data[1]]],data[1]])
train_data = new_train_data

new_valid_data = []
for data in valid_data:
    new_valid_data.append([[data[0],[data[1]]],data[1]])
valid_data = new_valid_data




from nlp_tools.tokenizer.donothing_tokenizer import DoNothingTokenizer


# 初始化句子和标签processor
labelProcessor = KfoldNerSentenceLabelProcessor()



from nlp_tools.generators import DataGenerator
class FolderBatchGenerator(DataGenerator):
    """数据生成器
    """
    def __init__(self,data,text_processor,label_processor,seq_length=None,batch_size=64, buffer_size=None,**kwargs):
        super(FolderBatchGenerator,self).__init__(data,batch_size,buffer_size)
        self.label_processor = label_processor
        self.seq_length = seq_length



    def __iter__(self, random=False):
        batch_x, batch_y = [], []
        for (x, y) in self.sample():

            batch_x.append(x)
            batch_y.append(y)

            if len(batch_x) == self.batch_size:
                sentences = []
                batch_y_fold = []
                for item in batch_x:
                    sentences.append(item[0])
                    batch_y_fold.append(item[1])
                max_sequence_len = max([len(x) for x in sentences])
                masking = []
                for seq in sentences:
                    seq_masking = [1] * len(seq)
                    masking.append(seq_masking)
                masking = pad_sequences(masking, max_sequence_len, padding='post', truncating='post')

                y_folder = self.label_processor.transform_kfold(batch_y_fold,seq_length=max_sequence_len)

                y_tensor = self.label_processor.transform(batch_y,
                                                          seq_length=max_sequence_len)
                yield (y_folder,masking), y_tensor
                batch_x, batch_y = [], []

        if batch_x:
            sentences = []
            batch_y_fold = []
            for item in batch_x:
                sentences.append(item[0])
                batch_y_fold.append(item[1])
            max_sequence_len = max([len(x) for x in sentences])
            masking = []
            for seq in sentences:
                seq_masking = [1] * len(seq)
                masking.append(seq_masking)
            masking = pad_sequences(masking, max_sequence_len, padding='post', truncating='post')

            y_folder = self.label_processor.transform_kfold(batch_y_fold, seq_length=max_sequence_len)

            y_tensor = self.label_processor.transform(batch_y,
                                                      seq_length=max_sequence_len)
            yield (y_folder, masking), y_tensor



# 构建模型
model = KfoldCrfModel(embedding=None,text_processor=None,label_processor=labelProcessor,use_rdrop=False,use_FGM=False)

early_stop = keras.callbacks.EarlyStopping(patience=20)
reduse_lr_callback = keras.callbacks.ReduceLROnPlateau(factor=0.1, patience=3)
ner_f1_save_callback = GlobalPointEvaluator(model,model_save_path=save_path,valid_data=valid_data)
model.fit(train_data,valid_data,epochs=50,callbacks=[early_stop,ner_f1_save_callback],batch_size=50,generator=FolderBatchGenerator)