#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import collections
import random

import numpy as np

filename='/home/paul/桌面/写诗器-homework.3/code/QuanSongCi.txt'

def read_data(filename):
    with open(filename, encoding="utf-8") as f:
        data = f.read()
    data = list(data)
    #vocabulary=data
    #print(data[:20])
    return data


def index_data(sentences, dictionary):
    shape = sentences.shape
    sentences = sentences.reshape([-1])
    index = np.zeros_like(sentences, dtype=np.int32)
    for i in range(len(sentences)):
        try:
            index[i] = dictionary[sentences[i]]
        except KeyError:
            index[i] = dictionary['UNK']

    return index.reshape(shape)


def get_train_data(vocabulary, batch_size, num_steps):
    
    vocab = set(vocabulary)#求出诗词里共有多少个独立的词汇，并打印
    print(len(vocab))
    # max_vocab_process
    vocab_count = {}
    #将独立的词汇放到字典里
    for word in vocab:
        vocab_count[word] = 0
    #计算每个独立词汇的词频数
    for word in vocabulary:
        vocab_count[word] += 1
        
    vocab_count_list = []        
    #同时再放到列表里
    for word in vocab_count:
        vocab_count_list.append((word, vocab_count[word]))
    vocab_count_list.sort(key=lambda x: x[1], reverse=True)
    if len(vocab_count_list) > 5000:
        vocab_count_list = vocab_count_list[:5000]
    vocab = [x[0] for x in vocab_count_list]
    #将列表转化为字典
    word_to_int_table = {c: i for i, c in enumerate(vocab)}
    
    '''''''''
    '''''''''
    arr = []
    for word in vocabulary:
        if word in word_to_int_table:
            arr.append(word_to_int_table[word])
    '''''''''
    '''''''''
    totall_batch_size = batch_size * num_steps
    n_batches = int(len(arr) / totall_batch_size)
    arr = arr[:totall_batch_size * n_batches]
    arr=np.array(arr)#list不能被reshape
    arr = arr.reshape((batch_size, -1))

    while True:
        np.random.shuffle(arr)#
        #循环时，n不是每次+1，每次+n_steps
        for n in range(0, arr.shape[1], num_steps):
            x = arr[:, n:n + num_steps]
            y = np.zeros_like(x)#类似于x,变成0矩阵
            #标签值就是下个单词词汇
            y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0]
            #yield必须在函数里使用，在函数外使用
            yield x, y


def build_dataset(words, n_words):
    """Process raw inputs into a dataset."""
    count = [['UNK', -1]]
    count.extend(collections.Counter(words).most_common(n_words - 1))
    dictionary = dict()
    for word, _ in count:
        dictionary[word] = len(dictionary)
    data = list()
    unk_count = 0
    for word in words:
        index = dictionary.get(word, 0)
        if index == 0:  # dictionary['UNK']
            unk_count += 1
        data.append(index)
    count[0][1] = unk_count
    reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
    return data, count, dictionary, reversed_dictionary
