from bert4keras.snippets import DataGenerator
from bert4keras.snippets import sequence_padding
import numpy as np
import re


from utils.word_vocab import convert_sentence_to_ids


class BertDataGenerator(DataGenerator):
    def __init__(self,data, tokenizer,maxlen,batch_size=32, buffer_size=None):
        self.tokenizer = tokenizer
        self.maxlen = maxlen
        super(BertDataGenerator,self).__init__(data,batch_size,buffer_size)
    def __iter__(self,random=False):
        batch_token_ids,batch_segment_ids,batch_labels = [],[],[]
        for is_end,(text,label) in self.sample(random):
            token_ids,segment_dis = self.tokenizer.encode(text,maxlen=self.maxlen)
            batch_token_ids.append(token_ids)
            batch_segment_ids.append(segment_dis)
            batch_labels.append([label])

            if len(batch_token_ids) == self.batch_size or is_end:
                batch_token_ids = sequence_padding(batch_token_ids)
                batch_segment_ids = sequence_padding(batch_segment_ids)
                batch_labels = sequence_padding(batch_labels)
                yield [batch_token_ids,batch_segment_ids,batch_labels],None
                batch_token_ids,batch_segment_ids,batch_labels = [],[],[]

class SimBertDataGenerator(DataGenerator):
    def __init__(self,data, tokenizer,maxlen,batch_size=32, buffer_size=None):
        self.tokenizer = tokenizer
        self.maxlen = maxlen
        self.some_samples = []
        super(SimBertDataGenerator,self).__init__(data,batch_size,buffer_size)

    def __iter__(self, random=False):
        batch_token_ids, batch_segment_ids = [], []
        for is_end, (text1, text2, label) in self.sample(random):
            if int(label) == 0:
                continue
            token_ids, segment_ids = self.tokenizer.encode(text1, text2, maxlen=self.maxlen * 2)
            batch_token_ids.append(token_ids)
            batch_segment_ids.append(segment_ids)
            self.some_samples.append(text1)
            # 调换一下顺序
            token_ids, segment_ids = self.tokenizer.encode(text2, text1, maxlen=self.maxlen * 2)
            batch_token_ids.append(token_ids)
            batch_segment_ids.append(segment_ids)

            if len(batch_token_ids) == self.batch_size or is_end:
                batch_token_ids = sequence_padding(batch_token_ids)
                batch_segment_ids = sequence_padding(batch_segment_ids)
                yield [batch_token_ids, batch_segment_ids], None
                batch_token_ids, batch_segment_ids = [], []




class BertClassifyDataGenerator(DataGenerator):
    def __init__(self,data, tokenizer,maxlen,batch_size=32, buffer_size=None):
        self.tokenizer = tokenizer
        self.maxlen = maxlen
        super(BertClassifyDataGenerator,self).__init__(data,batch_size,buffer_size)
    def __iter__(self,random=False):
        batch_token_ids,batch_segment_ids,batch_labels = [],[],[]
        for is_end,(text,label) in self.sample(random):
            token_ids,segment_dis = self.tokenizer.encode(text,maxlen=self.maxlen)
            batch_token_ids.append(token_ids)
            batch_segment_ids.append(segment_dis)
            batch_labels.append([label])

            if len(batch_token_ids) == self.batch_size or is_end:
                batch_token_ids = sequence_padding(batch_token_ids)
                batch_segment_ids = sequence_padding(batch_segment_ids)
                batch_labels = sequence_padding(batch_labels)
                yield [batch_token_ids,batch_segment_ids],batch_labels
                batch_token_ids,batch_segment_ids,batch_labels = [],[],[]


class BertGenTextDataGenerator(DataGenerator):
    """数据生成器
    """
    def __init__(self,data, tokenizer,maxlen,batch_size=32, buffer_size=None):
        self.tokenizer = tokenizer
        self.maxlen = maxlen
        super(BertGenTextDataGenerator,self).__init__(data,batch_size,buffer_size)

    def __iter__(self,random=False):
        batch_token_ids,batch_segment_ids = [],[]
        for is_end,text in self.sample(random):
            token_ids,segment_ids = self.tokenizer.encode(text)
            batch_token_ids.append(token_ids)
            batch_segment_ids.append(segment_ids)
            if len(batch_token_ids) == self.batch_size or is_end:
                batch_token_ids = sequence_padding(batch_token_ids)
                batch_segment_ids = sequence_padding(batch_segment_ids)
                yield [batch_token_ids,batch_segment_ids],None
                batch_token_ids,batch_segment_ids = [],[]



class QADataGenerator(DataGenerator):
    """单条样本格式：[CLS]篇章[SEP]问题[SEP]答案[SEP]
        """
    def __init__(self,data, tokenizer,max_p_len,max_qa_len,batch_size=32, buffer_size=None):
        self.tokenizer = tokenizer
        self.max_p_len = max_p_len
        self.max_qa_len = max_qa_len
        super(QADataGenerator,self).__init__(data,batch_size,buffer_size)

    def __iter__(self,random=False):
        batch_token_ids,batch_segment_ids = [],[]
        for is_end,D in self.sample(random):
            question = D['question']
            answers = [p['answer'] for p in D['passages'] if p['answer']]
            passage = np.random.choice(D["passages"])['passage']
            passage = re.sub(' |、|；|，', ',', passage)
            final_answer = ''
            for answer in answers:
                if all([
                    a in passage[:self.max_p_len - 2] for a in answer.split(" ")
                ]):
                    final_answer = answer.replace(" ",",")
                    break
            qa_token_ids,qa_segment_ids = self.tokenizer.encode(
                question,final_answer,maxlen=self.max_qa_len + 1
            )
            p_token_ids,p_segment_ids = self.tokenizer.encode(
                passage,maxlen = self.max_p_len
            )
            token_ids = p_token_ids + qa_token_ids[1:]
            segment_ids = p_segment_ids + qa_segment_ids[1:]

            batch_token_ids.append(token_ids)
            batch_segment_ids.append(segment_ids)

            if len(batch_token_ids) == self.batch_size or is_end:
                batch_token_ids = sequence_padding(batch_token_ids)
                batch_segment_ids = sequence_padding(batch_segment_ids)
                yield [batch_token_ids,batch_segment_ids],None
                batch_token_ids,batch_segment_ids = [],[]

class Seq2seqDataGenerator(DataGenerator):
    """seq2seq 训练的生成器
    """
    def __init__(self,data, vocab,maxlen,batch_size=32, buffer_size=None):
        self.vocab = vocab
        self.maxlen = maxlen
        super(Seq2seqDataGenerator,self).__init__(data,batch_size,buffer_size)

    def __iter__(self,random=False):
        batch_encoder_ids,batch_decoder_ids,batch_final_decoder_ids = [],[],[]
        for is_end,text_pair in self.sample(random):
            encoder_sentence,decoder_sentence = text_pair
            decoder_sentence = '<SOS> ' + decoder_sentence
            decoder_final_sentence = decoder_sentence + ' <EOS>'

            encoder_sentence_ids = convert_sentence_to_ids(encoder_sentence,self.vocab)
            decoder_sentence_ids = convert_sentence_to_ids(decoder_sentence,self.vocab)
            decoder_final_sentence_ids = convert_sentence_to_ids(decoder_final_sentence,self.vocab)

            batch_encoder_ids.append(encoder_sentence_ids)
            batch_decoder_ids.append(decoder_sentence_ids)
            batch_final_decoder_ids.append(decoder_final_sentence_ids)
            if len(batch_encoder_ids) == self.batch_size or is_end:
                batch_encoder_ids = sequence_padding(batch_encoder_ids)
                batch_decoder_ids = sequence_padding(batch_decoder_ids)
                batch_final_decoder_ids = sequence_padding(batch_final_decoder_ids)
                yield [batch_encoder_ids,batch_decoder_ids,batch_final_decoder_ids]
                batch_encoder_ids,batch_decoder_ids,batch_final_decoder_ids = [],[],[]

    def get_all_data(self,random=False):
        batch_encoder_ids, batch_decoder_ids, batch_final_decoder_ids = [], [], []
        for is_end, text_pair in self.sample(random):
            encoder_sentence, decoder_sentence = text_pair
            decoder_sentence = '<SOS> ' + decoder_sentence
            decoder_final_sentence = decoder_sentence + ' <EOS>'

            encoder_sentence_ids = convert_sentence_to_ids(encoder_sentence, self.vocab)
            decoder_sentence_ids = convert_sentence_to_ids(decoder_sentence, self.vocab)
            decoder_final_sentence_ids = convert_sentence_to_ids(decoder_final_sentence, self.vocab)

            batch_encoder_ids.append(encoder_sentence_ids)
            batch_decoder_ids.append(decoder_sentence_ids)
            batch_final_decoder_ids.append(decoder_final_sentence_ids)

            if is_end:
                break
        batch_encoder_ids = sequence_padding(batch_encoder_ids)
        batch_decoder_ids = sequence_padding(batch_decoder_ids)
        batch_final_decoder_ids = sequence_padding(batch_final_decoder_ids)

        return batch_encoder_ids, batch_decoder_ids, batch_final_decoder_ids


