#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import collections
import random
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np


def read_data(filename):
    with open(filename, encoding="utf-8") as f:
        data = f.read()
    data = list(data)
    return data


def index_data(sentences, dictionary):
    shape = sentences.shape
    sentences = sentences.reshape([-1])
    index = np.zeros_like(sentences, dtype=np.int32)
    for i in range(len(sentences)):
        try:
            index[i] = dictionary[sentences[i]]
        except KeyError:
            index[i] = dictionary['UNK']

    return index.reshape(shape)


def get_train_data(vocabulary, batch_size, num_steps):
    ##################
    # Your Code here
    ##################

    #vocabulary_size = 50000
    #data_index = 0

    #data, count, dictionary, reverse_dictionary = build_dataset(vocabulary, vocabulary_size)
    #train_data=[]
    #for step in xrange(num_steps):
    #    batch, labels, data_index = generate_batch(batch_size=batch_size, num_skips=2, skip_window=1, data=data,
    #                                           data_index=data_index)
    #    train_data.append(zip(batch,labels))
    #    print(zip(batch,labels))
    #return train_data
    sample_len=batch_size*(len(vocabulary)//batch_size)
    vo_array=np.array(vocabulary[:sample_len])
    vo_array=vo_array.reshape([batch_size,-1])
    sample_len=vo_array.shape[1]
    len_count=random.randint(0,16)

    while True:
        length=num_steps+1
        if len_count+length>sample_len:
            break
        yield vo_array[:,len_count:len_count+length]
        len_count=len_count+length

def build_dataset(words, n_words):
    """Process raw inputs into a dataset."""
    count = [['UNK', -1]]
    count.extend(collections.Counter(words).most_common(n_words - 1))
    dictionary = dict()
    for word, _ in count:
        dictionary[word] = len(dictionary)
    data = list()
    unk_count = 0
    for word in words:
        index = dictionary.get(word, 0)
        if index == 0:  # dictionary['UNK']
            unk_count += 1
        data.append(index)
    count[0][1] = unk_count
    reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
    return data, count, dictionary, reversed_dictionary


def build_dataset(words, n_words):
   """Process raw inputs into a dataset."""
   count = [['UNK', -1]]
   count.extend(collections.Counter(words).most_common(n_words - 1))
   dictionary = dict()
   for word, _ in count:
     dictionary[word] = len(dictionary)
   data = list()
   unk_count = 0
   for word in words:
     index = dictionary.get(word, 0)
     if index == 0:  # dictionary['UNK']
       unk_count += 1
     data.append(index)
   count[0][1] = unk_count
   reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
   return data, count, dictionary, reversed_dictionary

def generate_batch(batch_size, num_skips, skip_window,data,data_index=0):
   assert batch_size % num_skips == 0
   assert num_skips <= 2 * skip_window
   batch = np.ndarray(shape=(batch_size), dtype=np.int32)
   labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
   span = 2 * skip_window + 1  # [ skip_window target skip_window ]
   buffer = collections.deque(maxlen=span)
   if data_index + span > len(data):
     data_index = 0
   buffer.extend(data[data_index:data_index + span])
   data_index += span
   for i in range(batch_size // num_skips):
     context_words = [w for w in range(span) if w != skip_window]
     words_to_use = random.sample(context_words, num_skips)
     for j, context_word in enumerate(words_to_use):
       batch[i * num_skips + j] = buffer[skip_window]
       labels[i * num_skips + j, 0] = buffer[context_word]
     if data_index == len(data):
       buffer[:] = data[:span]
       data_index = span
     else:
       buffer.append(data[data_index])
       data_index += 1
   # Backtrack a little bit to avoid skipping words in the end of a batch
   data_index = (data_index + len(data) - span) % len(data)
   return batch, labels