#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import collections
import random

import numpy as np

from flags import parse_args
FLAGS, unparsed = parse_args()
import json

def read_data(filename):
    with open(filename, encoding="utf-8") as f:
        data = f.read()
    data = list(data)
    return data

#vocabulary = read_data(filename)
#print('Data size', len(vocabulary))

def index_data(sentences, dictionary):
    shape = sentences.shape
    sentences = sentences.reshape([-1])
    index = np.zeros_like(sentences, dtype=np.int32)
    for i in range(len(sentences)):
        try:
            index[i] = dictionary[sentences[i]]
        except KeyError:
            index[i] = dictionary['UNK']

    return index.reshape(shape)

with open(FLAGS.dictionary, encoding='utf-8') as inf:
    dictionary = json.load(inf, encoding='utf-8')

with open(FLAGS.reverse_dictionary, encoding='utf-8') as inf:
    reverse_dictionary = json.load(inf, encoding='utf-8')

def get_train_data(vocabulary, batch_size, num_steps):
    data_length = len(vocabulary)
    batch_partition_length = data_length // batch_size
    data_x = np.zeros([batch_size, batch_partition_length], dtype=np.int32)
    data_y = np.zeros([batch_size, batch_partition_length], dtype=np.int32)
    data = list()
    for word in vocabulary:
        index = dictionary.get(word, 0)
        data.append(index)
    for i in range(batch_size):
        data_x[i] = data[batch_partition_length * i:batch_partition_length * (i + 1)]
        data_y[i] = data[batch_partition_length * i+1:batch_partition_length * (i + 1)+1]
    # further divide batch partitions into num_steps for truncated backprop
    epoch_size = batch_partition_length // num_steps

    for i in range(epoch_size):
        x = data_x[:, i * num_steps:(i + 1) * num_steps]
        y = data_y[:, i * num_steps:(i + 1) * num_steps]
        yield (x, y)
    ##################
    # Your Code here
    ##################


def build_dataset(words, n_words):
    """Process raw inputs into a dataset."""
    count = [['UNK', -1]]
    count.extend(collections.Counter(words).most_common(n_words - 1))#在字典count末尾添加其他字典
    dictionary = dict()
    for word, _ in count:
        dictionary[word] = len(dictionary)#建立汉字的索引，由汉字词频最高的开始往下计算
    data = list()
    unk_count = 0
    for word in words:
        index = dictionary.get(word, 0)
        if index == 0:  # dictionary['UNK']
            unk_count += 1
        data.append(index)
    count[0][1] = unk_count
    reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
    return data, count, dictionary, reversed_dictionary
"""
count是字典，分别是word,word出现的次数(词频)，顺序由出现最多的往最少的排列
dictionary:是字典，分别是word,序号(也就是在count中的标号)
data对应的是列表:元素是所有的数据对应的索引，也就是将word转换成了整数索引
reversed_dictionary:也是字典，由序号索引word
"""
