# -*- coding: utf-8 -*-  
'''
baidu数据集转为tfrecord相关工具

@author: luoyi
Created on 2021年6月29日
'''
import glob
import os
import tensorflow as tf
import numpy as np

import utils.conf as conf
import utils.dictionaries as dicts
import utils.relationships as rel
import utils.poses as poses
import data.dataset_baidu as ds_baidu
from models.tplinker.preprocess import et2et_vidx, sht2sht_vidx



################################################################################################
#
#   TPLinker模型相关
#
################################################################################################
#    tfrecord_writer
class TPLinkerTFRecordWriter:
    def __init__(self,
                 original_file_path, out_file_path, 
                 max_sen_len=conf.TPLINKER.get_max_sentence_len(), 
                 record_count=conf.DATASET_BAIDU.get_record_count(),
                 ):
        
        self._original_file_path = original_file_path
        self._out_file_path = out_file_path
        
        #    实际的句子长度不包括[CLS][EOS]
        self._max_sen_len = max_sen_len - 2
        self._tfrecord_count = record_count
        pass
    
    #    生成tplinker可识别数据集
    def save_to_tplinker_dataset(self,
                                 show_cross=False):
        '''数据格式
            X ; Y_et2et ; Y_sh2sh ; Y_st2st
            例如:12,32,12...;0,2,5...;43,21,42...;43,11,25...
        '''
        #    计数器
        count_w = 0         #    当前写入总记录数
        count_fw = 0        #    当前文件写入总记录数
        idx_f = 0
        
        #    文件写入
        conf.mkfiledir_ifnot_exises(self._out_file_path)
        fw = tf.io.TFRecordWriter(self._out_file_path.format(idx_f))
        
        #    原始文件迭代器
        ods_iter = ds_baidu.dataset_iterator(self._original_file_path)
        for sample in ods_iter:
            #    忽略掉超过最大句长的句子
            if (len(sample.text) > self._max_sen_len): continue
            
            tfrecord = self.sample_to_tfrecord(sample, max_sen_len=self._max_sen_len)
            
            #    写入record文件
            fw.write(tfrecord)
            
            #    累加文件计数
            count_fw += 1
            count_w += 1
    
            #    打印进度
            if (show_cross): print('当前已写入:{} 当前文件已写入:{} 当前文件容量:{}'.format(count_w, count_fw, self._tfrecord_count))
            
            if (count_fw >= self._tfrecord_count):
                print('写满一个文件. idx_f:{} 文件记录数:{} 当前总记录数:{}'.format(idx_f, count_fw, count_w))
                
                fw.close()
                count_fw = 0
                idx_f += 1
                fw = tf.io.TFRecordWriter(self._out_file_path.format(idx_f))
                pass
            pass
        
        fw.close()
        print('全部记录已写完. 总记录数:{} 总文件数:{}'.format(count_w, idx_f + 1))
        pass
    
    #    将一条记录定义成tfrecord
    def sample_to_tfrecord(self, sample, max_sen_len=conf.TPLINKER.get_max_sentence_len()):
        #    原文本数据
        text = sample.text
        #    实体标注数据
        postag = sample.postag
        #    关系数据
        spo_list = sample.spo_list
        
        #    组成X数据
        X = dicts.word2idx_slist(['[CLS]'] + list(text) + ['[EOS]'])              #    首尾追加[CLS]和[EOS]
        
        #    et_to_et握手数据
        idx_et2et, idx_no_et = et2et_vidx(text, postag, max_sen_len)
        
        #    sht_to_sht握手数据
        idx_sh2sh_1, idx_sh2sh_2, idx_sh2sh_no, \
            idx_st2st_1, idx_st2st_2, idx_st2st_no,\
            rel_no_val, rel_has_1, rel_has_2 = sht2sht_vidx(text, spo_list, len(rel.id_rel), max_sen_len)
            
        feature = self.feature_write(X, 
                                     idx_et2et, idx_no_et, 
                                     idx_sh2sh_1, idx_sh2sh_2, idx_sh2sh_no,
                                     idx_st2st_1, idx_st2st_2, idx_st2st_no,
                                     rel_no_val, rel_has_1, rel_has_2)
        tfrecord = tf.train.Example(features=tf.train.Features(feature=feature))
        tfrecord = tfrecord.SerializeToString()
        return tfrecord
    
    #    tfrecord写入feature
    def feature_write(self, 
                       X, idx_et2et, idx_no_et,
                       idx_sh2sh_1, idx_sh2sh_2, idx_sh2sh_no,
                       idx_st2st_1, idx_st2st_2, idx_st2st_no,
                       rel_no_val, rel_has_1, rel_has_2):
        #    极简模式下的feature
        feature = {'X': tf.train.Feature(int64_list=tf.train.Int64List(value=X)),                               #    X数据
                   
                   'Y_idx_et2et': tf.train.Feature(int64_list=tf.train.Int64List(value=idx_et2et)),             #    Y实体首尾握手指针信息
                   'Y_idx_no_et': tf.train.Feature(int64_list=tf.train.Int64List(value=idx_no_et)),

                   'Y_shape_sh2sh_1': tf.train.Feature(int64_list=tf.train.Int64List(value=idx_sh2sh_1.shape)),
                   'Y_vec_sh2sh_1': tf.train.Feature(int64_list=tf.train.Int64List(value=np.reshape(idx_sh2sh_1, [-1]))),         #    所有关系的sh2sh==1的握手指针信息
                   
                   'Y_shape_sh2sh_2': tf.train.Feature(int64_list=tf.train.Int64List(value=idx_sh2sh_2.shape)),
                   'Y_vec_sh2sh_2': tf.train.Feature(int64_list=tf.train.Int64List(value=np.reshape(idx_sh2sh_2, [-1]))),         #    所有关系的sh2sh==2的握手指针信息
                   
                   'Y_shape_sh2sh_no': tf.train.Feature(int64_list=tf.train.Int64List(value=idx_sh2sh_no.shape)),
                   'Y_vec_sh2sh_no': tf.train.Feature(int64_list=tf.train.Int64List(value=np.reshape(idx_sh2sh_no, [-1]))),
                   
                   'Y_shape_st2st_1': tf.train.Feature(int64_list=tf.train.Int64List(value=idx_st2st_1.shape)),
                   'Y_vec_st2st_1': tf.train.Feature(int64_list=tf.train.Int64List(value=np.reshape(idx_st2st_1, [-1]))),         #    所有关系的st2st==1的握手指针信息
                   
                   'Y_shape_st2st_2': tf.train.Feature(int64_list=tf.train.Int64List(value=idx_st2st_2.shape)),
                   'Y_vec_st2st_2': tf.train.Feature(int64_list=tf.train.Int64List(value=np.reshape(idx_st2st_2, [-1]))),         #    所有关系的st2st==2的握手指针信息
                   
                   'Y_shape_st2st_no': tf.train.Feature(int64_list=tf.train.Int64List(value=idx_st2st_no.shape)),
                   'Y_vec_st2st_no': tf.train.Feature(int64_list=tf.train.Int64List(value=np.reshape(idx_st2st_no, [-1]))),
                   }       
        return feature
    
    pass


#    tfrecord_reader
class TPLinkerTFRecordReader:
    def __init__(self,
                 tplinker_dataset_path=conf.DATASET_BAIDU.get_train_tplinker_dataset_path(), max_file_idx=-1,
                 max_sen_len=conf.TPLINKER.get_max_sentence_len(),
                 rel_size=len(rel.id_rel),
                 ):
        self._tplinker_dataset_path = tplinker_dataset_path
        self._max_file_idx = max_file_idx
        self._max_sen_len = max_sen_len
        self._rel_size = rel_size
        
        self.init_features()
        pass
    
    #    根据record_type初始化features
    def init_features(self):
        #    tfrecord字段定义
        features = {
                'X': tf.io.VarLenFeature(tf.int64),
                
                'Y_idx_et2et': tf.io.VarLenFeature(tf.int64),
                'Y_idx_no_et': tf.io.VarLenFeature(tf.int64),
                
                'Y_shape_sh2sh_1': tf.io.VarLenFeature(tf.int64),
                'Y_vec_sh2sh_1': tf.io.VarLenFeature(tf.int64),
                'Y_shape_sh2sh_2': tf.io.VarLenFeature(tf.int64),
                'Y_vec_sh2sh_2': tf.io.VarLenFeature(tf.int64),
                'Y_shape_sh2sh_no': tf.io.VarLenFeature(tf.int64),
                'Y_vec_sh2sh_no': tf.io.VarLenFeature(tf.int64),
                
                'Y_shape_st2st_1': tf.io.VarLenFeature(tf.int64),
                'Y_vec_st2st_1': tf.io.VarLenFeature(tf.int64),
                'Y_shape_st2st_2': tf.io.VarLenFeature(tf.int64),
                'Y_vec_st2st_2': tf.io.VarLenFeature(tf.int64),
                'Y_shape_st2st_no': tf.io.VarLenFeature(tf.int64),
                'Y_vec_st2st_no': tf.io.VarLenFeature(tf.int64),
            }
        
        self._features = features
        pass
    
    #    极简模式数据
    def tfrecord_read(self,
                           serialized_example, 
                           max_sen_len=conf.TPLINKER.get_max_sentence_len(),
                           max_vec_len=512,
                           rel_size=len(rel.id_rel)):
        parsed = tf.io.parse_single_example(serialized_example, features=self._features)
        
        #    解析高维数据
        def parse_mat(parsed, field, max_pad):
            shape = tf.sparse.to_dense(parsed['Y_shape_' + field])
            vec = tf.sparse.to_dense(parsed['Y_vec_' + field])
            data = tf.reshape(vec, shape=shape)
            data = tf.pad(data, paddings=[[0, max_pad - shape[0]], [0, 0]], constant_values=-1)
            return data
        
        #    解析各个字段
        #    解析X数据，并追加句子位置编码，并用[PAD]补全到max_sen_len长度
        X = tf.sparse.to_dense(parsed['X'])
        X_sen = tf.ones_like(X)
        X = tf.stack([X, X_sen], axis=0)
        text_len = tf.shape(X)[-1]                                                                                      #    原句长度
        X = tf.pad(X, paddings=[[0, 0], [0, max_sen_len - text_len]], constant_values=0)                                #    Tensor(2, max_sen_len)
        
        #    去掉[CLS][PAD]的长度
        max_sen_len = max_sen_len - 2
        
        Y_idx_et2et = tf.sparse.to_dense(parsed['Y_idx_et2et'])                                                         #    Tensor(max_sen_len, )
        Y_idx_et2et = tf.pad(Y_idx_et2et[:, tf.newaxis],                                                                #    扩展成Tensor(max_sen_len, 2)，秩不同的Tensor无法合并成RaggedTensor
                             paddings=[[0, max_sen_len - tf.shape(Y_idx_et2et)[0]], [0, 1]], 
                             constant_values=-1)        
        Y_idx_no_et = tf.sparse.to_dense(parsed['Y_idx_no_et'])
        Y_idx_no_et = tf.pad(Y_idx_no_et[:, tf.newaxis],                                                                #    扩展成Tensor(max_sen_len, 2)，秩不同的Tensor无法合并成RaggedTensor
                             paddings=[[0, max_sen_len - tf.shape(Y_idx_no_et)[0]], [0, 1]], 
                             constant_values=-1)  
        
        Y_idx_sh2sh_1 = parse_mat(parsed, 'sh2sh_1', max_sen_len)                                                       #    Tensor(max_sen_len, 2)
        Y_idx_sh2sh_2 = parse_mat(parsed, 'sh2sh_2', max_sen_len)                                                       #    Tensor(max_sen_len, 2)
        Y_idx_sh_no = parse_mat(parsed, 'sh2sh_no', max_sen_len)
        
        Y_idx_st2st_1 = parse_mat(parsed, 'st2st_1', max_sen_len)                                                       #    Tensor(max_sen_len, 2)
        Y_idx_st2st_2 = parse_mat(parsed, 'st2st_2', max_sen_len)                                                       #    Tensor(max_sen_len, 2)
        Y_idx_st_no = parse_mat(parsed, 'st2st_no', max_sen_len)
        
        #    组成Y数据
        Y = tf.concat([
                        Y_idx_et2et[tf.newaxis, :, :], 
                        Y_idx_no_et[tf.newaxis, :, :],
                            
                        Y_idx_sh2sh_1[tf.newaxis, :, :],
                        Y_idx_sh2sh_2[tf.newaxis, :, :],
                        Y_idx_sh_no[tf.newaxis, :, :],
                        
                        Y_idx_st2st_1[tf.newaxis, :, :],
                        Y_idx_st2st_2[tf.newaxis, :, :],
                        Y_idx_st_no[tf.newaxis, :, :]
                    ], axis=0)
        
        return X, Y

    #    tplinker数据集
    def tensor_db(self, 
                  tplinker_dataset_path=conf.DATASET_BAIDU.get_train_tplinker_dataset_path(), max_file_idx=0, 
                  batch_size=conf.DATASET_BAIDU.get_batch_size(), 
                  epochs=conf.DATASET_BAIDU.get_epochs(),
                  shuffle_buffer_rate=conf.DATASET_BAIDU.get_shuffle_buffer_rate(),
                  tfrecord_buffer_rate=conf.DATASET_BAIDU.get_tfrecord_buffer_rate(),
                  ):
        '''
            @param param: tplinker_dataset_path配置的tfrecord文件路径
            @param max_file_idx: 总共读取文件最大索引。从0开始，每个文件32768条记录
            @param batch_size: 批量大小
            @param epochs: 训练轮数
            @param shuffle_buffer_rate: 打乱数据buffer是batch_size的多少倍。整数，-1表示不打乱
            @param tfrecord_buffer_rate: tfrecord每次读取的buffer
        '''
        #    取所有文件
        if (self._max_file_idx < 0): files = glob.glob(os.path.dirname(tplinker_dataset_path) + '/*.tfrecord')
        else: files = [tplinker_dataset_path.format(i) for i in range(max_file_idx + 1)]
        
        #    根据max_sen_len算出∑max_sen_len向量长度
        max_vec_len = 0
        for i in range(1, self._max_sen_len - 2 + 1): max_vec_len += i
        
        db = tf.data.TFRecordDataset(files)
        db = db.map(lambda s:self.tfrecord_reader.tfrecord_read(s, 
                                                                max_sen_len=self._max_sen_len,
                                                                max_vec_len=max_vec_len,
                                                                rel_size=self._rel_size))
        
        if (shuffle_buffer_rate > 0): db = db.shuffle(buffer_size=shuffle_buffer_rate * batch_size)
        if (batch_size > 0): db = db.batch(batch_size, drop_remainder=True)
        if (epochs > 0): db = db.repeat(epochs)
        
        return db

    pass




################################################################################################
#
#   CRFLinker模型相关（crf + tplinker混合模型）
#
################################################################################################
#    crf-linker 写入tfrecord
class CRFLinkerTFRecordWriter:
    def __init__(self,
                 original_file_path=conf.DATASET_BAIDU.get_train_data_path(), 
                 out_file_path=conf.DATASET_BAIDU.get_train_crflinker_dataset_path(), 
                 max_sen_len=conf.TPLINKER.get_max_sentence_len(), 
                 rel_size=len(rel.id_rel),
                 record_count=conf.DATASET_BAIDU.get_record_count(),
                 ):
        
        self._original_file_path = original_file_path
        self._out_file_path = out_file_path
        
        #    实际的句子长度不包括[CLS][EOS]
        self._max_sen_len = max_sen_len - 2
        self._rel_size = rel_size
        
        self._tfrecord_count = record_count
        pass
    
    #    生成crflinker可识别数据
    def save_to_tfrecord(self, show_cross=False):
        #    计数器
        count_w = 0         #    当前写入总记录数
        count_fw = 0        #    当前文件写入总记录数
        idx_f = 0
        #    文件写入
        conf.mkfiledir_ifnot_exises(self._out_file_path)
        fw = tf.io.TFRecordWriter(self._out_file_path.format(idx_f))
        
        #    迭代源文件
        for sample in ds_baidu.dataset_iterator(file_path=self._original_file_path):
            #    忽略掉超过最大句长的句子
            if (len(sample.text) > self._max_sen_len): continue
            
            #    忽略掉实体长度加起来不等于句子长度的句子。就1条
            pos_len = 0
            for pos in sample.postag: pos_len += len(pos['word'])
            if (pos_len != len(sample.text)): continue
            
            tfrecord = self.sample_to_tfrecord(sample, max_sen_len=self._max_sen_len, rel_size=self._rel_size)
            tfrecord = tf.train.Example(features=tf.train.Features(feature=tfrecord))
            tfrecord = tfrecord.SerializeToString()
            #    写入record文件
            fw.write(tfrecord)
            
            #    累加文件计数
            count_fw += 1
            count_w += 1
    
            #    打印进度
            if (show_cross): print('当前已写入:{} 当前文件已写入:{} 当前文件容量:{}'.format(count_w, count_fw, self._tfrecord_count))
            
            if (count_fw >= self._tfrecord_count):
                print('写满一个文件. idx_f:{} 文件记录数:{} 当前总记录数:{}'.format(idx_f, count_fw, count_w))
                
                fw.close()
                count_fw = 0
                idx_f += 1
                fw = tf.io.TFRecordWriter(self._out_file_path.format(idx_f))
                pass
            
            pass 
        pass
    
    #    记录转为record
    def sample_to_tfrecord(self, sample, max_sen_len=conf.TPLINKER.get_max_sentence_len(), rel_size=len(rel.id_rel)):
        #    取sample中的数据
        text = sample.text
        pos_list = sample.postag
        spo_list = sample.spo_list
        
        #    X数据相关
        #    text首尾追加[CLS],[EOS]，并转成编码
        x = dicts.word2idx_slist(['[CLS]'] + list(text) + ['[EOS]'])
        x = np.array(x)
        
        #    Y数据相关
        #    序列标注数据    list[]
        id_pos = self.id_pos(pos_list)
        id_pos = np.array(id_pos)
        #    st握手数据
        idx_sh2sh_1, idx_sh2sh_2, idx_sh2sh_no,\
            idx_st2st_1, idx_st2st_2, idx_st2st_no,\
            _, _, _ = sht2sht_vidx(text, spo_list, rel_size, max_sen_len)
        
        return self.feature_write(x, 
                                  id_pos, 
                                  idx_sh2sh_1, idx_sh2sh_2, idx_sh2sh_no, 
                                  idx_st2st_1, idx_st2st_2, idx_st2st_no)
        pass
    #    取序列标注信息
    def id_pos(self, pos_list):
        bio_list = []
        for pos in pos_list:
            word = pos['word']
            p = pos['pos']
            p = p.lower()
            
            #    取词相对于bio模式的序列
            bio = poses.bio_sen(word, p)
            #    如果出现未知词性，则删除这条数据
            if (bio is None): 
                print('出现未知词性. word:', word, ' pos:', p)
                return None
            
            bio_list += bio
            pass
        #    bio标注转为编码
        y_pos = poses.bio_tag_id_list(bio_list)
        #    开头结尾处追加[CLS][EOS]
        y_pos = [poses.bio_tag_id('[CLS]')] + y_pos + [poses.bio_tag_id('[EOS]')]
        return y_pos
    
    #    tfrecord写入feature
    def feature_write(self, 
                       X, id_pos,
                       idx_sh2sh_1, idx_sh2sh_2, idx_sh2sh_no,
                       idx_st2st_1, idx_st2st_2, idx_st2st_no):
        #    极简模式下的feature
        feature = {'X': tf.train.Feature(int64_list=tf.train.Int64List(value=X)),                               #    X数据
                   
                   'Y_id_pos': tf.train.Feature(int64_list=tf.train.Int64List(value=id_pos)),                   #    Y实体首尾握手指针信息

                   'Y_shape_sh2sh_1': tf.train.Feature(int64_list=tf.train.Int64List(value=idx_sh2sh_1.shape)),
                   'Y_vec_sh2sh_1': tf.train.Feature(int64_list=tf.train.Int64List(value=np.reshape(idx_sh2sh_1, [-1]))),         #    所有关系的sh2sh==1的握手指针信息
                   
                   'Y_shape_sh2sh_2': tf.train.Feature(int64_list=tf.train.Int64List(value=idx_sh2sh_2.shape)),
                   'Y_vec_sh2sh_2': tf.train.Feature(int64_list=tf.train.Int64List(value=np.reshape(idx_sh2sh_2, [-1]))),         #    所有关系的sh2sh==2的握手指针信息
                   
                   'Y_shape_sh2sh_no': tf.train.Feature(int64_list=tf.train.Int64List(value=idx_sh2sh_no.shape)),
                   'Y_vec_sh2sh_no': tf.train.Feature(int64_list=tf.train.Int64List(value=np.reshape(idx_sh2sh_no, [-1]))),
                   
                   'Y_shape_st2st_1': tf.train.Feature(int64_list=tf.train.Int64List(value=idx_st2st_1.shape)),
                   'Y_vec_st2st_1': tf.train.Feature(int64_list=tf.train.Int64List(value=np.reshape(idx_st2st_1, [-1]))),         #    所有关系的st2st==1的握手指针信息
                   
                   'Y_shape_st2st_2': tf.train.Feature(int64_list=tf.train.Int64List(value=idx_st2st_2.shape)),
                   'Y_vec_st2st_2': tf.train.Feature(int64_list=tf.train.Int64List(value=np.reshape(idx_st2st_2, [-1]))),         #    所有关系的st2st==2的握手指针信息
                   
                   'Y_shape_st2st_no': tf.train.Feature(int64_list=tf.train.Int64List(value=idx_st2st_no.shape)),
                   'Y_vec_st2st_no': tf.train.Feature(int64_list=tf.train.Int64List(value=np.reshape(idx_st2st_no, [-1]))),
                   }       
        return feature
    
    
        #    tensor数据集
    def tensor_db(self,
                  tplinker_dataset_path=conf.DATASET_BAIDU.get_train_tplinker_dataset_path(), max_file_idx=-1,
                  max_sen_len=conf.TPLINKER.get_max_sentence_len(),
                  rel_size=len(rel.id_rel),
                  batch_size=conf.DATASET_BAIDU.get_batch_size(), 
                  epochs=conf.DATASET_BAIDU.get_epochs(),
                  shuffle_buffer_rate=conf.DATASET_BAIDU.get_shuffle_buffer_rate(),
                  ):
        '''
            @param param: tplinker_dataset_path配置的tfrecord文件路径
            @param max_file_idx: 总共读取文件最大索引。从0开始，每个文件32768条记录
            @param batch_size: 批量大小
            @param epochs: 训练轮数
            @param shuffle_buffer_rate: 打乱数据buffer是batch_size的多少倍。整数，-1表示不打乱
            @param tfrecord_buffer_rate: tfrecord每次读取的buffer
        '''
        #    取所有文件
        if (max_file_idx < 0): files = glob.glob(os.path.dirname(tplinker_dataset_path) + '/*.tfrecord')
        else: files = [tplinker_dataset_path.format(i) for i in range(max_file_idx + 1)]
        
        #    根据max_sen_len算出∑max_sen_len向量长度
        max_vec_len = 0
        for i in range(1, max_sen_len - 2 + 1): max_vec_len += i
        
        db = tf.data.TFRecordDataset(files)
        db = db.map(lambda s:self.tfrecord_reader.tfrecord_read(s, 
                                                                max_sen_len=max_sen_len,
                                                                max_vec_len=max_vec_len,
                                                                rel_size=rel_size))
        
        if (shuffle_buffer_rate > 0): db = db.shuffle(buffer_size=shuffle_buffer_rate * batch_size)
        if (batch_size > 0): db = db.batch(batch_size, drop_remainder=True)
        if (epochs > 0): db = db.repeat(epochs)
        
        return db
    pass


#    crf-linker的tfrecord读取器
class CRFLinkerTFRecordReader:
    def __init__(self,
                 max_sen_len=conf.TPLINKER.get_max_sentence_len(),
                 rel_size=len(rel.id_rel),
                 ):
        self.init_features()
        
        self._max_sen_len = max_sen_len
        self._rel_size = rel_size
        pass
    
    #    根据record_type初始化features
    def init_features(self):
        #    tfrecord字段定义
        features = {
                'X': tf.io.VarLenFeature(tf.int64),
                
                'Y_id_pos': tf.io.VarLenFeature(tf.int64),
                
                'Y_shape_sh2sh_1': tf.io.VarLenFeature(tf.int64),
                'Y_vec_sh2sh_1': tf.io.VarLenFeature(tf.int64),
                'Y_shape_sh2sh_2': tf.io.VarLenFeature(tf.int64),
                'Y_vec_sh2sh_2': tf.io.VarLenFeature(tf.int64),
                'Y_shape_sh2sh_no': tf.io.VarLenFeature(tf.int64),
                'Y_vec_sh2sh_no': tf.io.VarLenFeature(tf.int64),
                
                'Y_shape_st2st_1': tf.io.VarLenFeature(tf.int64),
                'Y_vec_st2st_1': tf.io.VarLenFeature(tf.int64),
                'Y_shape_st2st_2': tf.io.VarLenFeature(tf.int64),
                'Y_vec_st2st_2': tf.io.VarLenFeature(tf.int64),
                'Y_shape_st2st_no': tf.io.VarLenFeature(tf.int64),
                'Y_vec_st2st_no': tf.io.VarLenFeature(tf.int64),
            }
        
        self._features = features
        pass
    
    #    极简模式数据
    def tfrecord_read(self,
                      serialized_example, 
                      ):
        parsed = tf.io.parse_single_example(serialized_example, features=self._features)
        
        #    解析高维数据
        def parse_mat(parsed, field, max_pad):
            shape = tf.sparse.to_dense(parsed['Y_shape_' + field])
            vec = tf.sparse.to_dense(parsed['Y_vec_' + field])
            data = tf.reshape(vec, shape=shape)
            data = tf.pad(data, paddings=[[0, max_pad - shape[0]], [0, 0]], constant_values=-1)
            #    这里做一次reshape完全是为了build的时候好拿shape，无实际意义
            data = tf.reshape(data, shape=(max_pad, 2))
            return data
        
        #    解析各个字段
        #    解析X数据，并追加句子位置编码，并用[PAD]补全到max_sen_len长度
        X = tf.sparse.to_dense(parsed['X'])
        X_sen = tf.ones_like(X)
        X = tf.stack([X, X_sen], axis=0)
        text_len = tf.shape(X)[-1]                                                                                      #    原句长度
        X = tf.pad(X, paddings=[[0, 0], [0, self._max_sen_len - text_len]], constant_values=0)                                #    Tensor(2, max_sen_len)
        
        #    去掉[CLS][PAD]的长度
        max_sen_len = self._max_sen_len
        
        Y_id_pos = tf.sparse.to_dense(parsed['Y_id_pos'])                                                               #    Tensor(max_sen_len, )
        Y_id_pos = tf.pad(Y_id_pos[:, tf.newaxis],                                                                      #    扩展成Tensor(max_sen_len, 2)，秩不同的Tensor无法合并成RaggedTensor
                             paddings=[[0, max_sen_len - tf.shape(Y_id_pos)[0]], [0, 1]],                               #    这里已经去掉了[CLS]和[EOS]
                             constant_values=-1)        
        Y_id_pos = tf.reshape(Y_id_pos, shape=(max_sen_len, 2))
        
        Y_idx_sh2sh_1 = parse_mat(parsed, 'sh2sh_1', max_sen_len)                                                       #    Tensor(max_sen_len, 2)
        Y_idx_sh2sh_2 = parse_mat(parsed, 'sh2sh_2', max_sen_len)                                                       #    Tensor(max_sen_len, 2)
        Y_idx_sh_no = parse_mat(parsed, 'sh2sh_no', max_sen_len)
        
        Y_idx_st2st_1 = parse_mat(parsed, 'st2st_1', max_sen_len)                                                       #    Tensor(max_sen_len, 2)
        Y_idx_st2st_2 = parse_mat(parsed, 'st2st_2', max_sen_len)                                                       #    Tensor(max_sen_len, 2)
        Y_idx_st_no = parse_mat(parsed, 'st2st_no', max_sen_len)
        
        #    组成Y数据
        Y = tf.stack([
                        Y_id_pos, 
                            
                        Y_idx_sh2sh_1,
                        Y_idx_sh2sh_2,
                        Y_idx_sh_no,
                        
                        Y_idx_st2st_1,
                        Y_idx_st2st_2,
                        Y_idx_st_no,
                    ], axis=0)
        
        return X, Y
    
    
    #    tensor数据集
    def tensor_db(self,
                  dataset_path=conf.DATASET_BAIDU.get_train_crflinker_dataset_path(), max_file_idx=-1,
                  batch_size=conf.DATASET_BAIDU.get_batch_size(), 
                  epochs=conf.DATASET_BAIDU.get_epochs(),
                  shuffle_buffer_rate=conf.DATASET_BAIDU.get_shuffle_buffer_rate(),
                  ):
        '''
            @param param: tplinker_dataset_path配置的tfrecord文件路径
            @param max_file_idx: 总共读取文件最大索引。从0开始，每个文件32768条记录
            @param batch_size: 批量大小
            @param epochs: 训练轮数
            @param shuffle_buffer_rate: 打乱数据buffer是batch_size的多少倍。整数，-1表示不打乱
            @param tfrecord_buffer_rate: tfrecord每次读取的buffer
        '''
        #    取所有文件
        if (max_file_idx < 0): files = glob.glob(os.path.dirname(dataset_path) + '/*.tfrecord')
        else: files = [dataset_path.format(i) for i in range(max_file_idx + 1)]
        
        #    根据max_sen_len算出∑max_sen_len向量长度
        max_vec_len = 0
        for i in range(1, self._max_sen_len - 2 + 1): max_vec_len += i
        
        db = tf.data.TFRecordDataset(files)
        db = db.map(lambda s:self.tfrecord_read(s))
        
        if (shuffle_buffer_rate > 0): db = db.shuffle(buffer_size=shuffle_buffer_rate * batch_size)
        if (batch_size > 0): db = db.batch(batch_size, drop_remainder=True)
        if (epochs > 0): db = db.repeat(epochs)
        
        return db
    pass





