# -*- coding: utf-8 -*-  
'''
cpt2.0的数据集
    
    预训练数据集：
        - tfrecord文件格式
        - 以字为单位
        - x开头追加<GO>
        - y末尾追加<EOS>
        - 数据保存进*.tfredord文件

@author: luoyi
Created on 2021年4月7日
'''
import tensorflow as tf
import numpy as np
import glob

import data.dataset as ds
import utils.dictionaries as dictionaries
import utils.conf as conf


#    预训练数据集-预处理
def pre_training_iterator(inputs_path, 
                          labels_path, 
                          count):
    couplet_iterator = ds.read_couplet_iterator(in_path=inputs_path, out_path=labels_path, count=count)
    #    遍历上下联
    for f,s in couplet_iterator:
        #    上下联转为词编码，并且追加头尾标志
        f = dictionaries.word2idx_slist(['<GO>'] + f)
        s = dictionaries.word2idx_slist(s + ['<EOS>'])
        
        x = np.array(f, dtype=np.int64)
        y = np.array(s, dtype=np.int64)
        yield x, y
        pass
    pass


#    预训练数据集tensor_db遍历
def pre_tensor_db_iterator(inputs_path, 
                           labels_path, 
                           count,
                           sentence_maxlen=conf.GPT2.get_pre_training_sentence_maxlen()):
    pre_iter = pre_training_iterator(inputs_path, labels_path, count)
    for x, y in pre_iter:
        #    按照最大长度截断，或补0
        if (len(x) > sentence_maxlen): x = x[:sentence_maxlen]
        if (len(x) < sentence_maxlen): x = np.pad(x, pad_width=(0, sentence_maxlen - len(x)))
        if (len(y) > sentence_maxlen): y = y[:sentence_maxlen]
        if (len(y) < sentence_maxlen): y = np.pad(y, pad_width=(0, sentence_maxlen - len(y)))
        
        x = tf.convert_to_tensor(x)
        y = tf.convert_to_tensor(y)
        yield x, y
        pass
    pass


#    tensor数据源
def pre_tensor_db(inputs_path, 
                  labels_path, 
                  count,
                  sentence_maxlen=conf.GPT2.get_pre_training_sentence_maxlen(),
                  batch_size=conf.DATASET.get_batch_size(),
                  epochs=conf.DATASET.get_epochs(),
                  shuffle_buffer_rate=conf.DATASET.get_shuffle_buffer_rate(),):
    db = tf.data.Dataset.from_generator(generator=lambda :pre_tensor_db_iterator(inputs_path, 
                                                                                 labels_path, 
                                                                                 count, 
                                                                                 sentence_maxlen), 
                                        output_types=(tf.int64, tf.int64), 
                                        output_shapes=(tf.TensorShape((sentence_maxlen,)), tf.TensorShape((sentence_maxlen, ))))
    
    if (shuffle_buffer_rate > 0): db = db.shuffle(shuffle_buffer_rate * batch_size)
    if (batch_size > 0): db = db.batch(batch_size, drop_remainder=True)
    if (epochs > 0): db = db.repeat(epochs)
    return db


#    预训练数据保存tfrecord文件
def save_pre_training_dataset(pret_iterator, 
                              tfrecord_dir,
                              limit=50000):
    fcount = 0      #    当前文件索引
    wcount = 0      #    当前文件中已经写入了多少条数据
    #    写入文件
    fpath = tfrecord_dir + '/cpt2_pre_training_{}.tfrecord'.format(fcount)
    conf.mkfiledir_ifnot_exises(fpath)
    fw = tf.io.TFRecordWriter(fpath)
    #    遍历预训练数据
    for x, y in pret_iterator:
        feature = {'x': tf.train.Feature(int64_list=tf.train.Int64List(value=x)),
                   'y': tf.train.Feature(int64_list=tf.train.Int64List(value=y))}
        example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
        example_proto = example_proto.SerializeToString()
        
        fw.write(example_proto)
        wcount += 1
        
        #    如果当前文件写入记录数超过最大限制，则重新写新文件
        if (wcount >= limit):
            fw.close()
            wcount = 0
            fcount += 1
            fpath = tfrecord_dir + '/cpt2_pre_training_{}.tfrecord'.format(fcount)
            fw = tf.io.TFRecordWriter(fpath)
            pass
        pass
    pass


data_fields = {"x": tf.io.VarLenFeature(tf.int64), "y": tf.io.VarLenFeature(tf.int64)}
#    解析数据
def parse_xy(serialized_example, sentence_maxlen):
    parsed = tf.io.parse_single_example(serialized_example, features=data_fields)
    
    x = tf.sparse.to_dense(parsed['x'])
    y = tf.sparse.to_dense(parsed['y'])
    
    #    超过sentence_maxlen的截断，不够的补0
    x = x[:sentence_maxlen]
    x = tf.concat([x, tf.zeros((sentence_maxlen - tf.shape(x)[0]), dtype=tf.int64)], axis=-1)
    y = y[:sentence_maxlen]
    y = tf.concat([y, tf.zeros((sentence_maxlen - tf.shape(y)[0]), dtype=tf.int64)], axis=-1)
    
    return x, y
#    从tfrecord文件中读取数据集
def tensor_db(tfrecord_dir,
              sentence_maxlen,
              batch_size=conf.GPT2.get_batch_size(),
              epochs=conf.GPT2.get_epochs(),
              shuffle_buffer_rate=conf.GPT2.get_shuffle_buffer_rate(),
              tfrecord_buffer_rate=conf.GPT2.get_tfrecord_buffer_rate()
              ):
    fpaths = glob.glob(tfrecord_dir + '/*.tfrecord')
    db = tf.data.TFRecordDataset(fpaths, buffer_size=tfrecord_buffer_rate * batch_size)
    db = db.map(lambda s:parse_xy(s, sentence_maxlen))
    #    貌似是先读一部分数据到GPU，不让gpu处于闲置状态
    db = db.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
    
    if (shuffle_buffer_rate > 0): db = db.shuffle(shuffle_buffer_rate * batch_size)
    if (batch_size > 0): db = db.batch(batch_size, drop_remainder=True)
    if (epochs > 0): db = db.repeat(epochs)
    
    return db