# -*- coding: utf-8 -*-  
'''
数据集

@author: luoyi
Created on 2021年3月17日
'''
import tensorflow as tf

import utils.conf as conf
import utils.dictionaries as dictionaries


#    读取对联
def read_couplet_iterator(in_path, 
                          out_path,
                          count=10000):
    in_fr = open(file=in_path, mode='r', encoding='utf-8')
    out_fr = open(file=out_path, mode='r', encoding='utf-8')
    
    i = 0
    while (count < 0 or i < count):
        i += 1
        in_line = in_fr.readline()
        if (in_line == ''): break
        in_line = in_line.split(' ')[:-1]           #    去掉末尾的\n
        
        out_line = out_fr.readline()
        if (out_line == ''): break
        out_line = out_line.split(' ')[:-1]         #    去掉末尾的\n
        
        yield in_line, out_line
        pass
    
    in_fr.close()
    out_fr.close()
    pass


#    数据迭代器（含数据预处理工作）
def tensor_data_iterator(in_path, 
                         out_path,
                         count=10000,
                         max_len=conf.TEXT.get_max_len(),
                         x_preprocess=None,
                         y_preprocess=None):
    couplet_iterator = read_couplet_iterator(in_path, out_path, count)
    for in_line, out_line in couplet_iterator:
        #    in的开头结尾追加<GO>, <EOS>做成encoder_inputs，并转编码格式，并扩充或截断到指定长度
        encoder_inputs = ['<GO>'] + in_line + ['<EOS>']
        encoder_inputs = [dictionaries.word2idx_slist(encoder_inputs)]
        encoder_inputs = tf.keras.preprocessing.sequence.pad_sequences(encoder_inputs, padding='post', maxlen=max_len)
        
        #    out_line头部追加<GO>做成decoder_inputs，并转编码格式，并扩充或截断到指定长度
        decoder_inputs = ['<GO>'] + out_line
        decoder_inputs = [dictionaries.word2idx_slist(decoder_inputs)]
        decoder_inputs = tf.keras.preprocessing.sequence.pad_sequences(decoder_inputs, padding='post', maxlen=max_len)
        #    out_line尾部追加<EOS>做成decoder_outputs，并转编码格式，并扩充或截断到指定长度
        decoder_outputs = out_line + ['<EOS>']
        decoder_outputs = [dictionaries.word2idx_slist(decoder_outputs)]
        decoder_outputs = tf.keras.preprocessing.sequence.pad_sequences(decoder_outputs, padding='post', maxlen=max_len)
        
        #    encoder_inputs 与 decoder_inputs 组成X
        X = tf.concat([encoder_inputs, decoder_inputs], axis=0)
        X = tf.cast(X, dtype=tf.int32)
        if (x_preprocess): X = x_preprocess(X)
        
        #    decoder_outputs单独组成 Y
        Y = tf.convert_to_tensor(decoder_outputs, dtype=tf.int32)
        Y = tf.squeeze(Y, axis=0)
        if (y_preprocess): Y = y_preprocess(Y)
        
        yield X, Y
        pass
    pass


#    tensor数据集
def tensor_db(in_path,
              out_path,
              count,
              max_len=conf.TEXT.get_max_len(),
              x_preprocess=None,
              y_preprocess=None,
              batch_size=conf.DATASET.get_batch_size(),
              epochs=conf.DATASET.get_epochs(),
              shuffle_buffer_rate=conf.DATASET.get_shuffle_buffer_rate()):
    x_shape = tf.TensorShape([2, max_len])
    y_shape = tf.TensorShape((max_len, ))
    db = tf.data.Dataset.from_generator(generator=lambda :tensor_data_iterator(in_path=in_path,
                                                                               out_path=out_path,
                                                                               count=count,
                                                                               max_len=max_len,
                                                                               x_preprocess=x_preprocess,
                                                                               y_preprocess=y_preprocess), 
                                        output_types=(tf.int32, tf.int32), 
                                        output_shapes=(x_shape, y_shape))
    if (shuffle_buffer_rate > 0): db = db.shuffle(buffer_size=batch_size * shuffle_buffer_rate)
    if (batch_size > 0): db = db.batch(batch_size=batch_size, drop_remainder=True)
    if (epochs > 0): db = db.repeat(epochs)
    return db


