import numpy as np
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Conv1D, GlobalMaxPooling1D

import glob
import os
from random import shuffle
from nltk.tokenize import TreebankWordTokenizer
from gensim.models.keyedvectors import KeyedVectors

imdb_filepath = 'train/aclImdb/train/'
google_filepath = 'train/GoogleNews-vectors-negative300.bin'


def pre_process_data():
    positive_path = os.path.join(imdb_filepath, 'pos')
    negative_path = os.path.join(imdb_filepath, 'neg')
    pos_label = 1
    neg_label = 0
    dataset = []

    for filename in glob.glob(os.path.join(positive_path, '*.txt')):
        with open(filename, 'r', encoding='utf-8') as f:
            dataset.append((pos_label, f.read()))

    for filename in glob.glob(os.path.join(negative_path, '*.txt')):
        with open(filename, 'r', encoding='utf-8') as f:
            dataset.append((neg_label, f.read()))

    shuffle(dataset)
    return dataset


def load_googlenews_vec():
    # 加载Google训练的词向量
    model = KeyedVectors.load_word2vec_format(google_filepath, binary=True,limit=20000)
    return model
    # print(model['word'])


def tokenize_and_vectorize(dataset):
    word_vectors = load_googlenews_vec()
    tokenizer = TreebankWordTokenizer()
    vectorized_data = []
    expected = []
    for sample in dataset:
        tokens = tokenizer.tokenize(sample[1])
        sample_vecs = []
        for token in tokens:
            try:
                sample_vecs.append(word_vectors[token])
            except KeyError:
                pass
        vectorized_data.append(sample_vecs)
    return vectorized_data


def collect_expected(dataset):
    expected = []
    for sample in dataset:
        expected.append(sample[0])
    return expected


# ==============================填充以及截断词条序列,以便CNN网络可以接受所需形状的张量=============================================
def pad_trunc(data, maxlen=400):
    """

    :param data:
    :param maxlen: 400
    :return:
    """
    new_data = []
    zero_vector = []
    for _ in range(len(data[0][0])):
        zero_vector.append(0.0)

    for sample in data:
        if len(sample) > maxlen:
            temp = sample[:maxlen]
        elif len(sample) < maxlen:
            temp = sample
            additional_elems = maxlen - len(sample)
            for _ in range(additional_elems):
                temp.append(zero_vector)
        else:
            temp = sample
        new_data.append(temp)
    return new_data


if __name__ == '__main__':
    pass
