# -*- coding: utf-8 -*-  
'''
百度NLU数据

Created on 2021年10月3日
@author: luoyi
'''
import tensorflow as tf

import utils.conf as conf
from utils.dictionary import Dictionaries
from utils.relationship import Relationship
from data.dataset_baidu_nlu_generator import QuestionIterator

#    问题迭代
class DatasetQuestions:
    def __init__(self,
                 dictionary=Dictionaries.instance(),
                 relationship=Relationship.instance(),
                 batch_size=conf.DATASET_BAIDU.get_batch_size(),
                 epochs=conf.DATASET_BAIDU.get_epochs(),
                 shuffle_buffer_rate=conf.DATASET_BAIDU.get_shuffle_buffer_rate(),
                 max_sen_len=conf.NLU.get_max_sen_len()):
        self._batch_size = batch_size
        self._epochs = epochs
        self._shuffle_buffer_rate = shuffle_buffer_rate
        
        self._dictionary = dictionary
        self._relationship = relationship
        
        self._max_sen_len = max_sen_len
        pass
    
    #    问题遍历
    def question_tensor_iterator(self,
                                 fpath=conf.DATASET_BAIDU.get_question_train_data_path(),
                                 max_sen_len=conf.NLU.get_max_sen_len(),
                                 count=-1):
        #    问题迭代器
        qi = QuestionIterator()
        q_iter = qi.data_iterator(fpath=fpath, count=count)
        for sen, predicate, subject_, object_ in q_iter:
            wids, senids, rid, pids = self.parse_sample(sen, predicate, subject_, object_)
            
            #    wids用max_sen_len限制最大长度 [0]填充
            if (len(wids) > max_sen_len): wids = wids[:max_sen_len]
            elif (len(wids) < max_sen_len): wids = wids + [0] * (max_sen_len - len(wids))
            #    senids用max_sen_len限制最大长度 [0]填充
            if (len(senids) > max_sen_len): senids = senids[:max_sen_len]
            elif (len(senids) < max_sen_len): senids = senids + [0] * (max_sen_len - len(senids))
            
            #    rid扩展为[max_sen_len]，用-1填充
            rid = [rid] + [-1] * (max_sen_len - 1)
            #    pids扩展为[max_sen_len]，用-1填充
            pids = pids + [-1] * (max_sen_len - len(pids))
            
            #    wids和senids组成X数据    Tensor(2, max_sen_len)
            x = tf.stack([tf.convert_to_tensor(wids),
                          tf.convert_to_tensor(senids)], axis=0)
            #    rid和pids组成Y数据        Tensor(2, max_sen_len)
            y = tf.stack([tf.convert_to_tensor(rid),
                          tf.convert_to_tensor(pids)], axis=0)
            
            yield x, y
            pass
        pass
    
    #    解析样本
    def parse_sample(self, sen, predicate, subject_, object_):
        '''
            @param sen: 问句序列
            @param predicate: 关系名
            @param subject_: (word, loc可为空, type, q)
            @param object_: (word, loc可为空, type, q)
            
            @return wids, senids, rid, pids
        '''
        #    sen以字单位转为字编码，并且首尾追加[CLS],[EOS]
        sen = list(sen)
        wids = self._dictionary.word2idx_slist(['[CLS]'] + sen + ['[EOS]'])
        senids = [1] * len(wids)
        
        #    关系转为关系id
        rid = self._relationship.rel_to_id(predicate)
        
        #    subject，object转为序列标注id（）
        pids = [0] * len(sen)                      #    默认是[PAD]对应的标注
        #    如果不是对subject提问
        if (not subject_[3]):
            word_s = subject_[0]
            loc_s = subject_[1]
            st = subject_[2]
            stids = self._relationship.st_to_id(word_s, st)
            pids[loc_s[0] : loc_s[1]+1] = stids
            pass
        #    如果不是对object提问
        elif (not object_[3]):
            word_o = object_[0]
            loc_o = object_[1]
            ot = object_[2]
            otids = self._relationship.ot_to_id(word_o, ot)
            pids[loc_o[0] : loc_o[1]+1] = otids
            pass
        #    首尾追加[CLS][EOS]对应的标注
        pids = [0] + pids + [5]
        
        return wids, senids, rid, pids
    
    #    tensor数据集
    def tensor_db(self,
                  fpath=conf.DATASET_BAIDU.get_question_train_data_path(),
                  count=-1
                  ):
        x_shape = tf.TensorShape([2, self._max_sen_len])
        y_shape = tf.TensorShape([2, self._max_sen_len])
        db = tf.data.Dataset.from_generator(generator=lambda :self.question_tensor_iterator(fpath=fpath, 
                                                                                            max_sen_len=self._max_sen_len,
                                                                                            count=count), 
                                            output_types=(tf.int64, tf.int64), 
                                            output_shapes=(x_shape, y_shape))
        if (self._shuffle_buffer_rate > 0): db = db.shuffle(buffer_size=self._batch_size * self._shuffle_buffer_rate)
        if (self._batch_size > 0): db = db.batch(batch_size=self._batch_size, drop_remainder=True)
        if (self._epochs > 0): db = db.repeat(self._epochs)
        return db
    pass
