#!/usr/bin/python
# -*- coding: utf-8 -*-
import codecs
import logging

import numpy as np

logging.basicConfig(level=logging.INFO,
                    format='%(levelname)s, %(asctime)s: %(message)s',
                    datefmt='%Y-%m-%d %H:%M:%S')


class CorpusHelper(object):

    def __init__(self, number_class):
        self.number_class = number_class
        self.max_length = 16
        self.load_embedding_vector()
        self.get_word_idx_map()

    def next_batch(self, batch_size=1024):
        epoch = 0
        step = 0
        self.idx = np.arange(batch_size)
        while True:
            remain = self.total_train - self.train_pos
            if remain > batch_size:
                self.idx = self.randoms[self.train_pos: self.train_pos + batch_size]
                self.train_pos += batch_size
            else:
                self.idx[:remain] = self.randoms[self.train_pos:]
                np.random.shuffle(self.randoms)
                epoch += 1
                self.idx[remain:] = self.randoms[:batch_size - remain]
                self.train_pos = batch_size - remain
            yield self.train_x[self.idx], self.train_y[self.idx], epoch, step
            step += 1

    def load_train_data(self, train_data_path='data/train_query_label_enhance_210000.csv',
                        test_data_path='data/test_query_label.csv'):
        with codecs.open(test_data_path, 'rb', 'utf-8') as f:
            sentences = f.readlines()
        sentences = map(lambda x: x.strip().split('##'), sentences)
        self.test_queries, test_y = zip(*sentences)
        del sentences
        self.test_y0 = np.array(test_y).astype(np.int32)
        self.test_x = self.get_row_format(self.test_queries)
        self.test_y = []
        for y0 in self.test_y0:
            self.test_y.append(np.eye(self.number_class)[y0])
        self.test_y = np.array(self.test_y)
        logging.info('test sentences %d' % len(self.test_y))

        with codecs.open(train_data_path, 'rb', 'utf-8') as f:
            sentences = f.readlines()
        sentences = map(lambda x: x.strip().split('##'), sentences)
        self.train_queries, train_y = zip(*sentences)
        del sentences
        self.train_y0 = np.array(train_y).astype(np.int32)
        self.train_x = self.get_row_format(self.train_queries)
        self.train_y = []
        for y0 in self.train_y0:
            self.train_y.append(np.eye(self.number_class)[y0])
        self.train_y = np.array(self.train_y)
        logging.info('train sentences %d' % len(self.train_y))

        self.total_train = len(self.train_x)
        self.randoms = np.arange(self.total_train)
        np.random.shuffle(self.randoms)
        self.train_pos = 0

    def load_embedding_vector(self, file_name='word2vec/group_text3_vector_1e2_w9_n1.dat_sort'):
        self.vocab_size = 10000
        self.embeddings = []
        self.chars = []
        for line in codecs.open(file_name, mode='r', encoding='utf8'):
            if len(self.chars) >= self.vocab_size:
                break
            tokens = line.rstrip().split(' ')
            if len(tokens) != 201:
                character = line[0]
            else:
                character = tokens[0]
                self.embeddings.append(tokens[1:])
            self.chars.append(character)
        self.embeddings = np.array(self.embeddings, dtype=np.float32)
        logging.info('the vocab_size is %d' % len(self.chars))

    def get_word_idx_map(self):
        self.word_map = {}
        word_idx = 0
        for key in self.chars:
            self.word_map[key] = word_idx
            word_idx += 1

    def gen_row(self, content, vector):
        for char in u'[+\\-=|&>《》，。<!！(){}[]^~*?？":\'/#·“”‘’, ]':
            content = content.replace(char, u'').lower()
        # 保留存在字典中的字
        content = filter(lambda x: x in self.word_map, content)
        # 截取前max_length
        content = content[:self.max_length]
        # 将字转换为对应编号(map 后返回一个list)
        content = map(lambda x: self.word_map[x], content)
        pos = max(0, self.max_length - len(content))
        for character in content:
            vector[pos] = character
            pos += 1

    def get_row_format(self, queries):
        X_data = np.zeros((len(queries), self.max_length), np.int32)
        for raw in range(len(X_data)):
            if raw % 10000 == 0:
                logging.info('raw %d / %d' % (raw, len(X_data)))
            self.gen_row(queries[raw], X_data[raw])
        return X_data


if __name__ == '__main__':
    CH = CorpusHelper()
    sents = u'''清水县下派干部晚上驻村
    手机购物带来大商机'''.split('\n')
    print(CH.get_row_format(sents))
    CH.load_train_data()
    for infos in CH.next_batch():
        break
