#/usr/bin/env python
#-*-coding:utf-8-*-

import json

MAX_LEN = 20
END_POS="</s>"
START_POS="<s>"
PAD_POS = "<pad>"
UNK_POS = "<unk>"

import tensorflow as tf

def load_word_to_id(file_path):
    map_dict = json.load(open(file_path))
    return map_dict

def create_one_sample(words, word_to_id, writer):
    word_ids = [word_to_id.get(w) for w in words]
    l = min((len(word_ids),MAX_LEN))
    inputs = word_ids[:l]
    targets=word_ids[1:l]
    targets.append(word_to_id.get(END_POS))
    lengths = [l]

    example = tf.train.SequenceExample()

    fl_labels = example.feature_lists.feature_list["inputs"]
    for l in inputs:
        fl_labels.feature.add().int64_list.value.append(l)

    fl_tokens = example.feature_lists.feature_list["targets"]
    for t in targets:
        fl_tokens.feature.add().int64_list.value.append(t)

    fl_sent_len = example.feature_lists.feature_list["lengths"]
    for t in lengths:
        fl_sent_len.feature.add().int64_list.value.append(t)

    writer.write(example.SerializeToString())



def create_tfrecord(input_file, out_file, word_to_id_file):
    in_f = open(input_file)
    word_to_id = load_word_to_id(word_to_id_file)
    writer = tf.python_io.TFRecordWriter(out_file)
    for _, line in enumerate(in_f):
        words = line.split()
        create_one_sample(words,word_to_id, writer)

if __name__ == '__main__':
    input_file = "/home/zhengwu/PycharmProjects/comment_bot/lm/comment_65_clean.txt"
    out_file = "./data/train.tfrecord"
    word_to_id_file  = "word_to_id.json"
    create_tfrecord(input_file, out_file, word_to_id_file)




