from bert4keras.snippets import AutoRegressiveDecoder
from bert4keras.snippets import sequence_padding
import numpy as np
import re

class BertGenTextGenerator(AutoRegressiveDecoder):
    '''
    基于bert language model形式自动生成文本
    '''
    def __init__(self, start_id, end_id, maxlen,generator_model,tokenizer, minlen=None):
        super(BertGenTextGenerator,self).__init__(start_id, end_id, maxlen, minlen)
        self.generator_model = generator_model
        self.tokenizer = tokenizer

    @AutoRegressiveDecoder.wraps(default_rtype='probas',use_states=False)
    def predict(self,inputs,output_ids,states):
        token_ids = inputs[0]
        token_ids = np.concatenate([token_ids,output_ids],1)
        segment_ids = np.zeros_like(token_ids)
        return self.generator_model.predict([token_ids,segment_ids])[:,-1]

    def generate(self,text,n=1,topk=5):
        token_ids,_ = self.tokenizer.encode(text)
        results = self.random_sample([token_ids[:-1]],n,topk)
        return [text + self.tokenizer.decode(ids) for ids in results]

class BertConditionGenerator(AutoRegressiveDecoder):
    '''
    条件输入的情况下，bert 自动生产文本方法
    '''
    def __init__(self, start_id, end_id, maxlen,generator_model,tokenizer, minlen=None):
        super(BertConditionGenerator,self).__init__(start_id, end_id, maxlen, minlen)
        self.generator_model = generator_model
        self.tokenizer = tokenizer

    @AutoRegressiveDecoder.wraps(default_rtype='probas',use_states=False)
    def predict(self,inputs,output_ids,states):
        token_ids = output_ids
        segment_ids = np.zeros_like(token_ids)
        return self.generator_model.predict([token_ids,segment_ids,inputs[0]])[:,-1]

    def generate(self,label,n=1,topk=5):
        results = self.random_sample([[label]],n,topk)
        return [self.tokenizer.decode(ids) for ids in results]

class SimBertGenerator(AutoRegressiveDecoder):
    '''
        输入一个句子，生产一大串相似文本
        '''
    def __init__(self, start_id, end_id, maxlen,generator_model,tokenizer, minlen=None):
        super(SimBertGenerator,self).__init__(start_id, end_id, maxlen, minlen)
        self.generator_model = generator_model
        self.tokenizer = tokenizer

    @AutoRegressiveDecoder.wraps(default_rtype='probas', use_states=False)
    def predict(self,inputs,output_ids,states):
        token_ids,segment_ids = inputs
        token_ids = np.concatenate([token_ids,output_ids],1)
        segment_ids = np.concatenate([segment_ids,np.ones_like(output_ids)],1)
        return self.generator_model.predict([token_ids,segment_ids])[:,-1]

    def generate(self,text,n=1,topk=5):
        token_ids,segment_ids = self.tokenizer.encode(text,maxlen=self.maxlen)
        output_ids = self.random_sample([token_ids,segment_ids],n,topk)
        return [self.tokenizer.decode(ids) for ids in output_ids]


def SimBert_gentext(text,sentence_generator,encoder_model,tokenizer,n=100,k=20):
    """"含义： 产生sent的n个相似句，然后返回最相似的k个。
        做法：用seq2seq生成，并用encoder算相似度并排序。
        效果：
            >>> SimBert_gentext(u'微信和支付宝哪个好？')
            [
                u'微信和支付宝，哪个好?',
                u'微信和支付宝哪个好',
                u'支付宝和微信哪个好',
                u'支付宝和微信哪个好啊',
                u'微信和支付宝那个好用？',
                u'微信和支付宝哪个好用',
                u'支付宝和微信那个更好',
                u'支付宝和微信哪个好用',
                u'微信和支付宝用起来哪个好？',
                u'微信和支付宝选哪个好',
            ]
        """
    gen_texts = sentence_generator.generate(text,n)
    gen_texts = [i for i in set(gen_texts) if i != text]
    gen_texts = [text] + gen_texts

    text_ids,text_segment_ids = [],[]
    for gen in gen_texts:
        gen_ids,gen_segment_ids = tokenizer.encode(gen)
        text_ids.append(gen_ids)
        text_segment_ids.append(gen_segment_ids)

    text_ids = sequence_padding(text_ids)
    text_segment_ids = sequence_padding(text_segment_ids)
    text_vecs = encoder_model.predict([text_ids,text_segment_ids])[0]

    text_vecs /= (text_vecs ** 2).sum(axis=1,keepdims=True)**0.5
    argsort = np.dot(text_vecs[1:],-text_vecs[0]).argsort()
    return [gen_texts[i+1] for i in argsort[:k]]



class ReadingComprehension(AutoRegressiveDecoder):
    """beam search解码来生成答案
    passages为多篇章组成的list，从多篇文章中自动决策出最优的答案，
    如果没答案，则返回空字符串。
    mode是extractive时，按照抽取式执行，即答案必须是原篇章的一个片段。
    """
    def __init__(self, start_id, end_id, max_a_len,max_p_len,max_q_len,generator_model,tokenizer, mode='extractive',minlen=None):
        super(ReadingComprehension,self).__init__(start_id, end_id, max_a_len, minlen)
        self.generator_model = generator_model
        self.tokenizer = tokenizer
        self.mode = mode
        self.max_p_len = max_p_len
        self.max_q_len = max_q_len

    def get_ngram_set(self,x,n):
        """生成ngram合集，返回结果格式是:
                {(n-1)-gram: set([n-gram的第n个字集合])}
                """
        result = {}
        for i in range(len(x) - n + 10):
            k = tuple(x[i:i + n])
            if k[:-1] not in result:
                result[k[:-1]] = set()
            result[k[:-1]].add(k[-1])
        return result

    @AutoRegressiveDecoder.wraps(default_rtype='probas',use_states=True)
    def predict(self,inputs,output_ids,states):
        inputs = [i for i in inputs if i[0,0] > -1]
        topk = len(inputs[0])
        all_token_ids,all_segment_ids = [],[]
        for token_ids in inputs:
            token_ids = np.concatenate([token_ids,output_ids],1)
            segment_ids = np.zeros_like(token_ids)
            if states >0:
                segment_ids[:,-output_ids.shape[1]:] = 1
            all_token_ids.extend(token_ids)
            all_segment_ids.extend(segment_ids)
        padded_all_token_ids = sequence_padding(all_token_ids)
        padded_all_segment_ids = sequence_padding(all_segment_ids)
        probas = self.generator_model.predict([padded_all_token_ids,padded_all_segment_ids])
        probas = [
            probas[i,len(ids) -1] for i ,ids in enumerate(all_token_ids)
        ]
        probas = np.array(probas).reshape((len(inputs),topk,-1))
        if states == 0:
            # 这一步主要是排除没有答案的篇章
            # 如果一开始最大值就为end_id，那说明该篇章没有答案
            argmax = probas[:,0].argmax(axis=1)
            available_idxs = np.where(argmax != self.end_id)[0]
            if len(available_idxs) == 0:
                scores = np.zeros_like(probas[0])
                scores[:,self.end_id] = 1
                return scores,states + 1
            else:
                for i in np.where(argmax == self.end_id)[0]:
                    inputs[i][:,0] = -1
                probas = probas[available_idxs]
                inputs = [i for i in inputs if i[0,0] > -1]

        if self.mode == 'extractive':
            # 如果是抽取式，那么答案必须是篇章的一个片段
            # 那么将非篇章片段的概率值全部置0
            new_probas = np.zeros_like(probas)
            ngrams = {}
            for token_ids in inputs:
                token_ids = token_ids[0]
                sep_idx = np.where(token_ids == self.tokenizer._token_end_id)[0][0]
                p_token_ids = token_ids[1:sep_idx]
                for k,v in self.get_ngram_set(p_token_ids,states + 1).items():
                    ngrams[k] = ngrams.get(k,set()) |v
            for i,ids in enumerate(output_ids):
                available_idxs = ngrams.get(tuple(ids),set())
                available_idxs.add(self.tokenizer._token_end_id)
                available_idxs = list(available_idxs)
                new_probas[:,i,available_idxs] = probas[:,i,available_idxs]
            probas = new_probas
        return (probas **2).sum(0)/(probas.sum(0)+ 1),states + 1

    def answer(self,question,passages,topk=1):
        token_ids = []
        for passage in passages:
            passage = re.sub(u' |、|；|，',",",passage)
            p_token_ids = self.tokenizer.encode(passage,maxlen=self.max_p_len)[0]
            q_token_ids = self.tokenizer.encode(question,maxlen=self.max_q_len + 1)[0]
            token_ids.append(p_token_ids + q_token_ids[1:])
        output_is = self.beam_search(
            token_ids,topk,states=0
        )
        return self.tokenizer.decode(output_is)






class BertGenTextGenerator(AutoRegressiveDecoder):
    '''
    基于bert language model形式自动生成文本
    '''
    def __init__(self, start_id, end_id, maxlen,generator_model,tokenizer, minlen=None):
        super(BertGenTextGenerator,self).__init__(start_id, end_id, maxlen, minlen)
        self.generator_model = generator_model
        self.tokenizer = tokenizer

    @AutoRegressiveDecoder.wraps(default_rtype='probas',use_states=True)
    def predict(self,inputs,output_ids,states):
        token_ids = inputs[0]
        token_ids = np.concatenate([token_ids,output_ids],1)
        segment_ids = np.zeros_like(token_ids)
        return self.generator_model.predict([token_ids,segment_ids])[:,-1]

    def generate(self,text,n=1,topk=5):
        token_ids,_ = self.tokenizer.encode(text)
        results = self.random_sample([token_ids[:-1]],n,topk)
        return [text + self.tokenizer.decode(ids) for ids in results]



class Seq2seqTextGenerator(AutoRegressiveDecoder):
    '''
    基于bert language model形式自动生成文本
    '''
    def __init__(self, start_id, end_id, maxlen,generator_model,tokenizer, minlen=None):
        super(Seq2seqTextGenerator,self).__init__(start_id, end_id, maxlen, minlen)
        self.generator_model = generator_model
        self.tokenizer = tokenizer

    @AutoRegressiveDecoder.wraps(default_rtype='probas',use_states=False)
    def predict(self,inputs,output_ids,states):
        if states == None:
            pass
        token_ids = inputs
        token_ids = np.concatenate([token_ids,output_ids],1)
        segment_ids = np.zeros_like(token_ids)
        return self.generator_model.predict([token_ids,segment_ids])[:,-1]

    def generate(self,text,n=1,topk=5):
        token_ids,_ = self.tokenizer.encode(text)
        results = self.random_sample([token_ids[:-1]],n,topk)
        return [text + self.tokenizer.decode(ids) for ids in results]



