import tensorflow as tf
from tensorflow.python.data import TextLineDataset, make_initializable_iterator, Iterator



OP_LIB_PATH = "./ops/op_lib/extracts_op.so"
CUSTOM_LIB = tf.load_op_library(OP_LIB_PATH)

def input_fn(data_files, hash_slots, continuous_slots=None, batch_size=128, epoch=1, key_max_size=500000000, buffer_size=40):
    def _parse_batch(line_batch):
        parsed_slots = CUSTOM_LIB.ParseSampleV1(line=line_batch, hash_slots=hash_slots, hash_slots_size=len(hash_slots), key_max_size=key_max_size)
        label_slot = parsed_slots.label
        slot_values = parsed_slots.slot_value
        slot_indices = parsed_slots.slot_indices
        slot_shapes = parsed_slots.slot_shape
        keys = parsed_slots.key
        
        features = {}
        for slot_id, slot_value, slot_indice, slot_shape in zip(hash_slots, slot_values, slot_indices, slot_shapes):
            features[slot_id] = tf.sparse.SparseTensor(slot_indice, slot_value, slot_shape)
        
        return features, label_slot, keys

    assert (isinstance(data_files, list))
    if data_files[0][-3:]=='.gz':
        sample_lines = TextLineDataset(data_files, compression_type="GZIP")
    else:
        sample_lines = TextLineDataset(data_files)
    # https://www.cnblogs.com/huangyc/p/10340766.html    data pipeline 优化
    sample_lines.repeat(epoch)
    sample_lines = sample_lines.prefetch(tf.data.experimental.AUTOTUNE)
    sample_batch = sample_lines.batch(batch_size)
    data_set = sample_batch.map(_parse_batch, num_parallel_calls=tf.data.experimental.AUTOTUNE)
    data_set = data_set.prefetch(tf.data.experimental.AUTOTUNE)
    # data_set = sample_batch
    data_iter = tf.compat.v1.data.make_one_shot_iterator(data_set)
    # features, labels, keys = data_iter.get_next()
    # return features, labels, keys
    return data_iter.get_next()

def test_line(data_files, hash_slots, key_max_size=500000000):
    def _parse_batch(line_batch):
        parsed_slots = CUSTOM_LIB.ParseSampleV1(line=line_batch, hash_slots=hash_slots, hash_slots_size=len(hash_slots), key_max_size=key_max_size)
        label_slot = parsed_slots.label
        slot_values = parsed_slots.slot_value
        slot_indices = parsed_slots.slot_indices
        slot_shapes = parsed_slots.slot_shape
        keys = parsed_slots.key
        
        features = {}
        for slot_id, slot_value, slot_indice, slot_shape in zip(hash_slots, slot_values, slot_indices, slot_shapes):
            features[slot_id] = tf.sparse.SparseTensor(slot_indice, slot_value, slot_shape)
        
        return features, label_slot, keys
    sample_lines = TextLineDataset(data_files)
    sample_batch = sample_lines.batch(2)
    data_iter = tf.compat.v1.data.make_one_shot_iterator(sample_batch)
    return _parse_batch(data_iter.get_next())
    # return data_iter.get_next()