#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import collections
import random

import numpy as np



#读文件 到 list
def read_data(filename):
    with open(filename, encoding="utf-8") as f:
        data = f.read()
    data = list(data)
    return data


def index_data(sentences, dictionary):
    shape = sentences.shape
    sentences = sentences.reshape([-1])
    index = np.zeros_like(sentences, dtype=np.int32)
    for i in range(len(sentences)):
        try:
            index[i] = dictionary[sentences[i]]
        except KeyError:
            index[i] = dictionary['UNK']

    return index.reshape(shape)
    
    
data_index = 0    
#num_steps: RNN有多少个time step，也就是输入数据的长度是多少.
def get_train_data(vocabulary, batch_size, num_steps, dictionary):
    ################### Your Code here##################
    data_partition_size = len(vocabulary) // batch_size
    raw_x = [dictionary.get(ch, 0) for ch in vocabulary]
    raw_y = [dictionary.get(ch, 0) for ch in vocabulary[1:]]
    
    data_x = np.zeros([batch_size, data_partition_size], dtype=np.int32)
    data_y = np.zeros([batch_size, data_partition_size], dtype=np.int32)
    for i in range(batch_size):
        data_x[i] = raw_x[data_partition_size * i:data_partition_size * (i + 1)]
        data_y[i] = raw_y[data_partition_size * i:data_partition_size * (i + 1)]
    epoch_size = data_partition_size // num_steps
    for i in range(epoch_size):
        x = data_x[:, i * num_steps: (i + 1) * num_steps]
        y = data_y[:, i * num_steps: (i + 1) * num_steps]
        yield (x, y)
                 
'''
#下面是ppt中的做法
    global data_index #记录运行到了那里
    
    skip_window = 1 #相关词的范围，左右范围
    num_skips = 2 #一个词重复使用多少次
    
    batch = np.ndarray(shape=(batch_size), dtype=np.int32) #存放生成的这一批的数据
    labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32) #存放相关词
    span = 2 * skip_window + 1
    buffer = collections.deque(maxlen=span) #声明一个系列
    if data_index + span > len(vocabulary):
        data_index = 0
    buffer.extend(vocabulary[data_index:data_index + span]) #从所有数据中截取一段作为这次的输入，buffer序列增加数据
    data_index = 0
    for i in range(batch_size // num_skips):
        context_words = [w for w in range(span) if w != skip_windows] #把自己从相关词中去掉
        words_to_use = random.sample(context_words, num_skips) #随机选取num_skips个数据，这里是下标
        for j, context_word in enumerate(words_to_use):
            batch[i * num_skips + j] = buffer[skip_window]
            labels[i * num_skips+j, 0] = buffer[context_word]
        if data_index == len(vocabulary):
            buffer[:] = data[:span]
            data_index = span
        else:
            buffer.append(data[data_index])
            data_index += 1
    
    data_index = (data_index + len(vocabulary) - span) % len(vocabulary)
    print(batch)
    print(labels)
    return batch, labels
'''

def build_dataset(words, n_words):
    """Process raw inputs into a dataset."""
    count = [['UNK', -1]]
    count.extend(collections.Counter(words).most_common(n_words - 1))
    dictionary = dict()
    for word, _ in count:
        dictionary[word] = len(dictionary)
    data = list()
    unk_count = 0
    for word in words:
        index = dictionary.get(word, 0)
        if index == 0:  # dictionary['UNK']
            unk_count += 1
        data.append(index)
    count[0][1] = unk_count
    reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
    with open(FLAGS.dictionary, 'w') as result_file:
        json.dump(dictionary, result_file)
    with open(FLAGS.reverse_dictionary, 'w') as result_file2:
        json.dump(reversed_dictionary, result_file2)
    return data, count, dictionary, reversed_dictionary



