import tensorflow as tf
import numpy as np
import pickle
import os
from config import *
def dict_2_pkl_file(d, file_name):
    print('写入 map file')
    with open(file_name, 'wb') as w:
        pickle.dump(d, w)


def dict_from_pkl_file(filename):
    print('读取 map file')
    with open(filename, "rb") as f:
        w2i, i2w = pickle.load(f)
    return w2i, i2w


def get_dict(files):
    print('正在生成字典。。。')
    w2i_dict = {'PAD': 0, 'BOS': 1}
    i2w_dict = {}
    for file in files:
        file_name = os.path.join("data", file)
        with open(file_name, encoding='utf8') as f:
            fline = f.readline()
            while fline:
                for w in fline.replace('\n', '').split():
                    if w not in w2i_dict:
                        w2i_dict[w] = np.int32(len(w2i_dict))
                fline = f.readline()
    i2w_dict = {i: w for w, i in w2i_dict.items()}
    print('w2i_dict:',w2i_dict)
    return w2i_dict, i2w_dict


def get_batch(batch_size):

    def map_func_only_sentences(line):
        # function：return (batch data, true batch length) with stype (array, array)
        # input: sentences of string
        def my_func(lines):
            batch_2id = np.asarray([[dict[word] for word in line.decode('utf8').split()] for line in lines])
            batch_length = [len(a) for a in batch_2id]
            #  max length of batch data
            max_length = max(batch_length)
            # padding
            batch = [np.pad(d, ((0, max_length - len(d))), 'constant', constant_values=((0, 0))) for d in batch_2id]
            return batch, batch_length
        batch_data, batch_len = tf.py_func(my_func, [line], [tf.int32, tf.int32])
        return batch_data, batch_len

    def map_func_data(s, l):
        # function：return (batch data, true batch length) with stype (array, array)
        # input: sentences of string
        def my_func(sentences, labels):
            sentences_2id = np.asarray([[dict[word] for word in line.decode('utf8').split()] for line in sentences])
            labels_2id = np.asarray([[dict[word] for word in line.decode('utf8').split()] for line in labels])
            sentences_length = [len(a) for a in sentences_2id]
            labels_length = [len(a) for a in labels_2id]
            #  max length of batch data
            max_sentence_length = max(sentences_length)
            max_label_length = max(labels_length)
            # padding
            sentences_padded = [np.pad(d, ((0, max_sentence_length - len(d))), 'constant', constant_values=((0, 0))) for d in sentences_2id]
            labels_padded = [np.pad(d, ((0, max_label_length - len(d))), 'constant', constant_values=((0, 0))) for d in labels_2id]

            return sentences_padded, np.int32(sentences_length), labels_padded, np.int32(labels_length)

        sentences_padded, sentences_length, labels_padded, labels_length = tf.py_func(
            my_func, [s, l], [tf.int32, tf.int32, tf.int32, tf.int32])
        return sentences_padded, sentences_length, labels_padded, labels_length


    sentences = tf.data.TextLineDataset(configer.sentence_name)
    labels = tf.data.TextLineDataset(configer.label_name)
    data = tf.data.Dataset.zip((sentences, labels))
    data = data.batch(batch_size=3)
    data = data.map(map_func_data, num_parallel_calls=4)
    iterator = data.make_one_shot_iterator()
    next_element = iterator.get_next()
    return next_element


def test(batch_size):
    result = get_batch(batch_size)
    result_sentence = tf.reduce_sum(result[0])
    result_label = tf.reduce_sum(result[2])
    r = tf.equal(result_sentence, result_label)
    with tf.Session() as s:
        for i in range(100000):
            print(s.run(r))


class getBatch():
    def __init__(self, map_name):
        self.w2i, self.i2w = dict_from_pkl_file(map_name)
        self.map_name = map_name

    def get_batch(self, batch_size, epoch):
        dict = self.w2i
        def map_func_only_sentences(line):
            # function：return (batch data, true batch length) with stype (array, array)
            # input: sentences of string
            def my_func(lines):
                batch_2id = np.asarray([[dict[word] for word in line.decode('utf8').split()] for line in lines])
                batch_length = [len(a) for a in batch_2id]
                #  max length of batch data
                max_length = max(batch_length)
                # padding
                batch = [np.pad(d, ((0, max_length - len(d))), 'constant', constant_values=((0, 0))) for d in batch_2id]
                return batch, batch_length

            batch_data, batch_len = tf.py_func(my_func, [line], [tf.int32, tf.int32])
            return batch_data, batch_len

        def map_func_data(s, l):
            # function：return (batch data, true batch length) with stype (array, array)
            # input: sentences of string
            def my_func(sentences, labels):

                sentences_2id = np.asarray([[dict[word] for word in line.decode('utf8').split()] for line in sentences])
                labels_2id = np.asarray([[dict[word] for word in line.decode('utf8').split()] for line in labels])
                sentences_length = [len(a) for a in sentences_2id]

                labels_length = [len(a) for a in labels_2id]
                #  max length of batch data
                max_sentence_length = max(sentences_length)
                max_label_length = max(labels_length)
                # padding
                sentences_padded = [np.pad(d, ((0, max_sentence_length - len(d))), 'constant', constant_values=((0, 0)))
                                    for d in sentences_2id]
                labels_padded = [np.pad(d, ((0, max_label_length - len(d))), 'constant', constant_values=((0, 0))) for d
                                 in labels_2id]
                return np.int32(sentences_padded), np.int32(sentences_length), np.int32(labels_padded), np.int32(labels_length)

            sentences_padded_, sentences_length_, labels_padded_, labels_length_ = \
                tf.py_func(my_func, [s, l], [tf.int32, tf.int32, tf.int32, tf.int32])
            return sentences_padded_, sentences_length_, labels_padded_, labels_length_

        sentences_data = tf.data.TextLineDataset('data/'+configer.sentence_name)
        labels_data = tf.data.TextLineDataset('data/'+configer.label_name)
        data = tf.data.Dataset.zip((sentences_data, labels_data))
        data = data.batch(batch_size=batch_size)
        data = data.repeat(count=100)
        data = data.map(map_func_data, num_parallel_calls=4)
        iterator = data.make_one_shot_iterator()
        next_element = iterator.get_next()
        return next_element

#maps = get_dict(["sentence.txt", "label.txt"])
#dict_2_pkl_file(maps, 'data/maps.pkl')

# if __name__ == '__main__':
#     maps = get_dict(["sentence.txt"])
#     dict_2_pkl_file(maps, 'data/maps.pkl')