#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import collections
import random

import numpy as np
import json


def read_data(filename):
    with open(filename, encoding="utf-8") as f:
        data = f.read()
    data = list(data)
    return data


def index_data(sentences, dictionary):
    shape = sentences.shape
    sentences = sentences.reshape([-1])
    index = np.zeros_like(sentences, dtype=np.int32)
    for i in range(len(sentences)):
        try:
            index[i] = dictionary[sentences[i]]
        except KeyError:
            index[i] = dictionary['UNK']

    return index.reshape(shape)


class data_producter:
    def __init__(self, filename = 'QuanSongCi.txt', filter = False, offset = 0):
        self.filename = filename
        with open('dictionary.json', encoding='utf-8') as inf:
            self.dictionary = json.load(inf)

        with open('reverse_dictionary.json') as inf:
            self.reverse_dictionary = json.load(inf)

        self.data = []
        for word in read_data(filename):
            nonwords = (',', '.', ' ', '\n', '，', '。')
            if filter and ( word in nonwords) :
                continue
            index = self.dictionary.get(word, 0)
            self.data.append(index)

        self.offset = offset
        self.data_index = offset

    def reset(self, offset = 0):
        self.data_index = offset

    def get_train_data(self, batch_size, num_steps):
        ##################
        # Your Code here
        ##################
        while True:
            span = batch_size*num_steps
            end = self.data_index + span
            if end + 1 > 40000:
                return
            batch = self.data[self.data_index: end]
            labels = self.data[self.data_index + 1: end + 1]
            batch = np.asarray(batch).reshape((-1, num_steps))
            labels = np.asarray(labels).reshape((-1, num_steps))
            self.data_index = end
            yield batch, labels


def build_dataset(words, n_words):
    """Process raw inputs into a dataset."""
    count = [['UNK', -1]]
    count.extend(collections.Counter(words).most_common(n_words - 1))
    dictionary = dict()
    for word, _ in count:
        dictionary[word] = len(dictionary)
    data = list()
    unk_count = 0
    for word in words:
        index = dictionary.get(word, 0)
        if index == 0:  # dictionary['UNK']
            unk_count += 1
        data.append(index)
    count[0][1] = unk_count
    reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
    return data, count, dictionary, reversed_dictionary
