# -*- coding: utf-8 -*-

# In[ ]:7.3 Convolutional neural nets
#### 7.3.4 Padding
#### Listing 7.1 Keras network with one convolution layer
from keras.models import Sequential
from keras.layers import Conv1D
model = Sequential()
model.add(Conv1D(filters=16,
kernel_size=3,
padding='same',
activation='relu',
strides=1,
input_shape=(100, 300)))

# In[ ]: 7.4 Narrow windows indeed
#### 7.4.1 Implementation in Keras: prepping the data
#### Listing 7.2 Import your Keras convolution tools
import numpy as np
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Conv1D, GlobalMaxPooling1D

#### Listing 7.3 Preprocessor to load your documents
import glob
import os
from random import shuffle
def pre_process_data(filepath):
    positive_path = os.path.join(filepath, 'pos/')
    negative_path = os.path.join(filepath, 'neg/')
    pos_label = 1
    neg_label = 0
    dataset = []
    
    for filename in glob.glob(os.path.join(positive_path, '*.txt')):
        with open(filename, 'r',encoding='UTF-8') as f:
            dataset.append((pos_label, f.read()))
    for filename in glob.glob(os.path.join(negative_path, '*.txt')):
        with open(filename, 'r',encoding='UTF-8') as f:
            dataset.append((neg_label, f.read()))

    shuffle(dataset)
    return dataset

#dataset = pre_process_data('<path to your downloaded file>/aclimdb/train')
dataset = pre_process_data('E:/Mine/大三/机器学习/aclImdb/train')
dataset[0]

#### Listing 7.4 Vectorizer and tokenizer
from nltk.tokenize import TreebankWordTokenizer

###############################################################################
#GoogleNews-vectors-negative300.bin是训练好的300维的新闻语料词向量，若全部加载，
#会占用较多内存，故可部分加载最常用的词向量。
###############################################################################
from gensim.models.keyedvectors import KeyedVectors

#word_vectors = KeyedVectors.load_word2vec_format('D:\\0新闻语料库\\Google新闻语料词向量300维\\googlenews-vectors-negative300.bin.gz',binary=True)# 加载原始二进制格式的模型
#word_vectors = KeyedVectors.load_word2vec_format('D:\\0新闻语料库\\Google新闻语料词向量300维\\googlenews-vectors-negative300.bin.gz',binary=True, limit=400000)
# 从谷歌新闻语料库中加载最常用的 40 万个词,limit 参数：减少加载到内存中的词的数量
word_vectors = KeyedVectors.load_word2vec_format('E:\\Mine\\大三\\机器学习\\googlenews-vectors-negative300.bin.gz',binary=True, limit=200000)



def tokenize_and_vectorize(dataset):
    tokenizer = TreebankWordTokenizer()
    vectorized_data = []
    expected = []
    for sample in dataset:
        tokens = tokenizer.tokenize(sample[1])
        sample_vecs = []
        for token in tokens:
            try:
                sample_vecs.append(word_vectors[token])
            except KeyError:
                pass # No matching token in the Google w2v vocab

        vectorized_data.append(sample_vecs)

    return vectorized_data

#### Listing 7.5 Target labels
def collect_expected(dataset):
    """ Peel off the target values from the dataset """
    expected = []
    for sample in dataset:
        expected.append(sample[0])
    return expected

vectorized_data = tokenize_and_vectorize(dataset)
expected = collect_expected(dataset)

#### Listing 7.6 Train/test split
split_point = int(len(vectorized_data)*.8)

x_train = vectorized_data[:split_point]
y_train = expected[:split_point]
x_test = vectorized_data[split_point:]
y_test = expected[split_point:]

#### Listing 7.7 CNN parameters
maxlen = 400
batch_size = 32
embedding_dims = 300
filters = 250
kernel_size = 3
hidden_dims = 250
epochs = 2

#### Listing 7.8 Padding and truncating your token sequence
def pad_trunc(data, maxlen):
    """
    For a given dataset pad with zero vectors or truncate to maxlen
    """
    new_data = []
    # Create a vector of 0s the length of our word vectors
    zero_vector = []
    for _ in range(len(data[0][0])):
        zero_vector.append(0.0)

    for sample in data:
        if len(sample) > maxlen:
            temp = sample[:maxlen]
        elif len(sample) < maxlen:
            temp = sample
    # Append the appropriate number 0 vectors to the list
    additional_elems = maxlen - len(sample)
    for _ in range(additional_elems):
        temp.append(zero_vector)
    else:
        temp = sample
    new_data.append(temp)
    return new_data

#### Listing 7.9 Gathering your augmented and truncated data
x_train = pad_trunc(x_train, maxlen)
x_test = pad_trunc(x_test, maxlen)
x_train = np.reshape(x_train, (len(x_train), maxlen, embedding_dims))
y_train = np.array(y_train)
x_test = np.reshape(x_test, (len(x_test), maxlen, embedding_dims))
y_test = np.array(y_test)


#### 7.4.2 Convolutional neural network architecture
#### Listing 7.10 Construct a 1D CNN
print('Build model...')
model = Sequential()
model.add(Conv1D(filters,kernel_size,padding='valid',activation='relu',strides=1,input_shape=(maxlen, embedding_dims)))

#### 7.4.3 Pooling
model.add(GlobalMaxPooling1D())

#### 7.4.4 Dropout
#### Listing 7.11 Fully connected layer with dropout
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
model.add(Activation('relu'))

#### 7.4.5 The cherry on the sundae
#### Listing 7.12 Funnel
model.add(Dense(1))
model.add(Activation('sigmoid'))

#### Listing 7.13 Compile the CNN
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])

#### Listing 7.14 Output layer for categorical variable (word)
model.add(Dense(1))
model.add(Activation('sigmoid'))

#### OPTIMIZATION
#### Listing 7.15 Training a CNN
model.fit(x_train, y_train,batch_size=batch_size,epochs=epochs,validation_data=(x_test, y_test))

#### 7.4.6 Let’s get to learning (training)
#### Listing 7.16 Save your hard work
model_structure = model.to_json()
with open("cnn_model.json", "w") as json_file:
    json_file.write(model_structure)
model.save_weights("cnn_weights.h5")

validation_data=(x_test, y_test)

#### 7.4.7 Using the model in a pipeline
#### Listing 7.17 Loading a saved model
from keras.models import model_from_json
with open("cnn_model.json", "r") as json_file:
    json_string = json_file.read()
model = model_from_json(json_string)
model.load_weights('cnn_weights.h5')

#### Listing 7.18 Test example
sample_1 = ''.join(["I hate that the dismal weather had me down for so long, when will it break!"+
            "Ugh, when does happiness return? The sun is blinding and the puffy clouds are too thin. "+
            "I can't wait for the weekend."])

#### Listing 7.19 Prediction
vec_list = tokenize_and_vectorize([(1, sample_1)])
test_vec_list = pad_trunc(vec_list, maxlen)
test_vec = np.reshape(test_vec_list, (len(test_vec_list), maxlen, embedding_dims))
model.predict(test_vec)

model.predict_classes(test_vec)





