#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import collections
import random

import numpy as np
import copy


def read_data(filename):
    with open(filename, encoding="utf-8") as f:
        data = f.read()
    data = list(data)
    return data


def index_data(sentences, dictionary):
    shape = sentences.shape
    sentences = sentences.reshape([-1])
    index = np.zeros_like(sentences, dtype=np.int32)
    for i in range(len(sentences)):
        try:
            index[i] = dictionary[sentences[i]]
        except KeyError:
            index[i] = dictionary['UNK']

    return index.reshape(shape)


def get_train_data(vocabulary, batch_size, num_steps):
    ##################
    # Your Code here
    ##################
    data = copy.deepcopy(vocabulary)
    vocabulary.remove(vocabulary[0])
    vocabulary.append(data[0])
    label = vocabulary
    data_length = len(data)
    batch_length = num_steps*batch_size
    input_size = data_length // batch_length
    for i in range(input_size):
        data_x = data[batch_length*i:batch_length*(i+1)]
        label_y = label[batch_length * i:batch_length * (i + 1)]
        x = np.array(data_x).reshape(16,32)
        y = np.array(label_y).reshape(16,32)
        z = np.vstack([x,y])
        yield z

	


def build_dataset(words, n_words):
    """Process raw inputs into a dataset."""
    count = [['UNK', -1]]
    count.extend(collections.Counter(words).most_common(n_words - 1))
    dictionary = dict()
    for word, _ in count:
        dictionary[word] = len(dictionary)
    data = list()
    unk_count = 0
    for word in words:
        index = dictionary.get(word, 0)
        if index == 0:  # dictionary['UNK']
            unk_count += 1
        data.append(index)
    count[0][1] = unk_count
    reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
    return data, count, dictionary, reversed_dictionary

if  __name__ == "__main__":
    import json
    vocabulary = read_data('QuanSongCi.txt')
    with open("reverse_dictionary.json", encoding='utf-8') as inf:
        reverse_dictionary = json.load(inf, encoding='utf-8')
    print(reverse_dictionary)
    # with open("dictionary.json", encoding='utf-8') as inf:
    #     dictionary = json.load(inf, encoding='utf-8')
    # print(dictionary)
    # d = np.ndarray([32,32],np.int32)
    # for dl in get_train_data(vocabulary,16,32):
    #     for i in range(32):
    #         for j in range(32):
    #             chinese = dl[i][j]
    #             if chinese in dictionary.keys():
    #                 number = dictionary[chinese]
    #             else:
    #                 number = -1
    #             d[i][j] = number
    #     print(d[:16,:])
