import tensorflow as tf
import codecs

def load_id2word():
    id2word = dict()
    with codecs.open('../data/simple_text/vocab.ch', 'r', 'utf8') as r:
        count = 0
        for line in r.readlines():
            word = line.strip()
            id2word[count] = word
            count += 1
    return id2word

def load_csv_dataset(batch_size=1, embeddings=None):
    ds = tf.data.experimental.CsvDataset('../data/simple_text/ch.index', [tf.int64]*10)
    ds = ds.map(lambda *x:[x])
    if embeddings is not None:
        ds = ds.map(lambda x:(tf.gather(embeddings, x), tf.stack(x)))
    ds = ds.map(lambda x,y: (x[:-1], y[1:]))
    ds = ds.shuffle(buffer_size=100).batch(batch_size=batch_size)
    return ds

def load_csv_dataset_test():
    ds = load_csv_dataset()
    ele = ds.make_one_shot_iterator().get_next()
    with tf.Session() as sess:
        while True:
            try:
                print(sess.run(ele))
            except tf.errors.OutOfRangeError:
                break

def load_dataset():
    ds = tf.data.TextLineDataset('../data/simple_text/ch')
    ds = ds.map(lambda string: tf.string_split([string]).values)
    ds = ds.batch(batch_size=1)
    ele = ds.make_one_shot_iterator().get_next()
    with tf.Session() as sess:
        while True:
            try:
                r = sess.run(ele)
                for bline in r:
                    line = []
                    for bword in bline:
                        line.append(bword.decode('utf8'))
                    print(' '.join(line))
                # print()
            except tf.errors.OutOfRangeError:
                break

def load_vocab_tf():
    from tensorflow.contrib.lookup.lookup_ops import MutableHashTable
    table = MutableHashTable(key_dtype=tf.string, value_dtype=tf.int64, default_value=0, name='vocalb_hash_table')
    with codecs.open('../data/simple_text/vocab.ch', 'r', 'utf8') as r:
        keys = []
        values = []
        count = 0
        for line in r.readlines():
            keys.append(line.strip())
            values.append(count)
            count += 1
    insert_op = table.insert(keys, values)
    return table,insert_op

def load_vocab_tf_test():
    table, insert_op = load_vocab_tf()
    r = table.lookup(tf.constant(value=['大', '传', '同'], dtype=tf.string))
    sess = tf.Session()
    sess.run(tf.initialize_all_tables())
    sess.run(insert_op)
    r_val = sess.run(r)
    print(r_val)

if __name__ == '__main__':
    #load_vocab_tf_test()
    load_csv_dataset_test()