#!/usr/bin/python
# -*- coding: utf-8 -*-
import codecs
import logging

import numpy as np

import tensorflow as tf
import jieba

logging.basicConfig(level=logging.INFO,
                    format='%(levelname)s, %(asctime)s: %(message)s',
                    datefmt='%Y-%m-%d %H:%M:%S')


class CorpusHelper(object):

    def __init__(self, ):
        # 这里指5个词
        self.max_length = 5
        self.load_embedding_vector()
        self.get_word_idx_map()

    def next_batch(self, batch_size=1024):
        epoch = 0
        step = 0
        self.idx = np.arange(batch_size)
        while True:
            remain = self.total_train - self.train_pos
            if remain > batch_size:
                self.idx = self.randoms[self.train_pos: self.train_pos + batch_size]
                self.train_pos += batch_size
            else:
                self.idx[:remain] = self.randoms[self.train_pos:]
                np.random.shuffle(self.randoms)
                epoch += 1
                self.idx[remain:] = self.randoms[:batch_size - remain]
                self.train_pos = batch_size - remain
            yield self.train_x[self.idx], self.train_y[self.idx], epoch, step
            step += 1

    def load_train_data(self, train_data_path='data/textcnn_train_toutiaohao_2_class_v2.csv',
                        test_data_path='data/textcnn_test_toutiaohao_2_class_v2.csv'):
        with codecs.open(test_data_path, 'rb', 'utf-8') as f:
            sentences = f.readlines()
        sentences = list(map(lambda x: x.strip().split('\t'), sentences))
        test_y, self.test_sents = zip(*sentences)
        del sentences
        self.test_x = self.get_row_format(self.test_sents)
        self.test_y0 = np.array(test_y).astype(np.int32)
        self.test_y = np.zeros([len(self.test_y0), 2])
        for idx, y in enumerate(self.test_y0):
            self.test_y[idx][y] = 1
        logging.info('test sentences %d' % len(self.test_y))

        with codecs.open(train_data_path, 'rb', 'utf-8') as f:
            sentences = f.readlines()
        sentences = list(map(lambda x: x.strip().split('\t'), sentences))
        train_y, self.train_sents = zip(*sentences)
        del sentences
        self.train_y0 = np.array(train_y).astype(np.int32)
        self.train_x = self.get_row_format(self.train_sents)
        # np.save('word_train_y0.npy', self.train_y0)
        # np.save('word_train_x.npy', self.train_x)

        # self.train_y0 = np.load('word_train_y0.npy')
        # self.train_x = np.load('word_train_x.npy')

        self.train_y = np.zeros([len(self.train_y0), 2])
        for idx, y in enumerate(self.train_y0):
            self.train_y[idx][y] = 1
        logging.info('train sentences %d' % len(self.train_y))

        self.total_train = len(self.train_x)
        self.randoms = np.arange(self.total_train)
        np.random.shuffle(self.randoms)
        self.train_pos = 0

    def load_embedding_vector(self, file_name='/mnt/cephfs/mahao/word_embedding/top_30000_word_embedding.txt'):
        self.words = []
        words_embedding = []
        for line in codecs.open(file_name, mode='r', encoding='utf8'):
            items = line.strip().split(' ')
            if len(items) != 1025:
                continue

            self.words.append(items[0])
            words_embedding.append(items[1:])

        self.vocab_size = len(self.words)
        self.embeddings = np.array(words_embedding, dtype=np.float32)
        tf.logging.info('vocab_size %d' % self.vocab_size)

    def get_word_idx_map(self):
        self.word_map = {}
        word_idx = 0
        for key in self.words:
            self.word_map[key] = word_idx
            word_idx += 1

    @staticmethod
    def process_query(query=''):
        tokens = jieba.cut(query.strip(), cut_all=False)

        return list(tokens)

    def gen_row(self, content, vector):
        content = CorpusHelper.process_query(content)
        content = list(filter(lambda x: x in self.word_map, content))
        content = content[:self.max_length]
        content = list(map(lambda x: self.word_map[x], content))
        pos = max(0, self.max_length - len(content))
        for word in content:
            vector[pos] = word
            pos += 1

    def get_row_format(self, queries):
        X_data = np.zeros((len(queries), self.max_length), np.int32)
        for raw in range(len(X_data)):
            if raw % 100000 == 0:
                logging.info('raw %d / %d' % (raw, len(X_data)))
            self.gen_row(queries[raw], X_data[raw])
        return X_data


if __name__ == '__main__':
    CH = CorpusHelper()
    CH.load_train_data()
