# -*- coding: utf-8 -*-
'''
Created on 2016年12月19日

@author: ZhuJiahui
'''

import os
import collections
import numpy as np
import random

class TextLoader():
    
    def __init__(self, text_corpus, batch_size, skip_window=3, min_frequency=5):    
        
        self.text_corpus = text_corpus
        self.batch_size = batch_size
        self.min_frequency = min_frequency  # 过滤出现次数少于min_frq的词
        self.skip_window = skip_window

        self.vocabulary, self.words = self.build_vocabulary()
        self.vocabulary_size = len(self.words)
        
        self.raw_data = [[self.vocabulary.get(w, 1) for w in line] for line in text_corpus]
        
        self.create_batches()
        self.reset_batch_pointer()

    def build_vocabulary(self):
        word_counts = collections.Counter()
        for text_line in self.text_corpus:
            word_counts.update(text_line)
            
        vocabulary_inv = ['<UNK>'] + [x[0] for x in word_counts.most_common() if x[1] >= self.min_frequency]
        vocabulary = {x : i for i, x in enumerate(vocabulary_inv)}
        return [vocabulary, vocabulary_inv]

    def create_batches(self):
        inputs, targets = list(), list()
        for raw_line_text in self.raw_data:
            for start_index in range(self.skip_window, len(raw_line_text) - self.skip_window):
                context = np.random.randint(0, self.skip_window - 1)
                inputs.append(raw_line_text[start_index - context])
                targets.append(raw_line_text[start_index])
                
                context = random.randint(0, self.skip_window - 1)
                inputs.append(raw_line_text[start_index + context])
                targets.append(raw_line_text[start_index])

        self.batch_num = int(len(inputs) / self.batch_size)
        if self.batch_num == 0:
            assert False, "Not enough data. Make seq_length and batch_size small."
        
        # 截取数据 略去剩余
        inputs = np.array(inputs[:(self.batch_num * self.batch_size)])
        targets = np.array(targets[:(self.batch_num * self.batch_size)])
        
        # 分割一维列表为二维列表
        self.inputs_batches = np.split(inputs, self.batch_num, 0)
        self.targets_batches = np.split(targets, self.batch_num, 0)

    def next_batch(self):
        inputs, targets = self.inputs_batches[self.pointer], self.targets_batches[self.pointer]
        self.pointer = (self.pointer + 1) % self.batch_num
        return inputs, targets

    def reset_batch_pointer(self):
        self.pointer = 0


def test():
    data_dir = '../dataset/text8'
    #data_dir = '../data/tinyshakespeare'
    batch_size = 64
    win_size = 3
    loader = TextLoader(data_dir, batch_size, win_size)
    inputs, targets = loader.next_batch()
    print(len(inputs), len(targets))
    for ind in inputs:
        print(loader.words[ind])
    for ind in targets:
        print(loader.words[ind])

if __name__ == '__main__':
    test()