# #-*- coding:utf-8 -*-
# import sys
# reload(sys)
# sys.setdefaultencoding('utf-8')
import matplotlib.pyplot as plt
import keras
# print keras.__version__
from keras import regularizers
from keras import models
from keras import layers
from keras.datasets import imdb
import numpy as np
import os




def vectorize_sequences(sequences, dimension=10000):
    # Create an all-zero matrix of shape (len(sequences), dimension)
    results = np.zeros((len(sequences), dimension))
    for i, sequence in enumerate(sequences):
        results[i, sequence] = 1.  
    # set specific indices of results[i] to 1s
    return results


def train_plot(epochs,train_data, train_labels,test_data, test_labels):
    #---------------------下面是训练集和验证集---------------------------------
    # Our vectorized training data
    x_train = vectorize_sequences(train_data)
    # Our vectorized test data
    x_test = vectorize_sequences(test_data)
    # Our vectorized labels
    y_train = np.asarray(train_labels).astype('float32')
    y_test = np.asarray(test_labels).astype('float32')
    
    
    #----------------以下是参考组---------------------------------------
    original_model = models.Sequential()
    original_model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
    original_model.add(layers.Dense(16, activation='relu'))
    original_model.add(layers.Dense(1, activation='sigmoid'))
    
    original_model.compile(optimizer='rmsprop',
                           loss='binary_crossentropy',
                           metrics=['acc'])
    
    
    original_hist = original_model.fit(x_train, y_train,
                                       epochs=epochs,#①
                                       batch_size=512,
                                       validation_data=(x_test, y_test))
    #----------------以上是参考组---------------------------------------
    #----------------下面是对照组---------------------------------------
    
    
    # Let's add two `Dropout` layers in our IMDB network to see how well they do at reducing overfitting:
    
    
    dpt_model = models.Sequential()
    dpt_model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
    dpt_model.add(layers.Dropout(0.5))
    dpt_model.add(layers.Dense(16, activation='relu'))
    dpt_model.add(layers.Dropout(0.5))
    dpt_model.add(layers.Dense(1, activation='sigmoid'))
    dpt_model.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['acc'])
    
    
    dpt_model_hist = dpt_model.fit(x_train, y_train,
                                   epochs=epochs,#②
                                   batch_size=512,
                                   validation_data=(x_test, y_test))
    epochs = range(1, epochs+1)#②
    #----------------绘制训练结果----------------------------
    dpt_model_val_loss = dpt_model_hist.history['val_loss']
    original_val_loss = original_hist.history['val_loss']
    dpt_model.save("dpt_model.h5")
    plt.plot(epochs, original_val_loss, 'b+', label='Original model')
    plt.plot(epochs, dpt_model_val_loss, 'bo', label='Dropout-regularized model')
    plt.xlabel('Epochs')
    plt.ylabel('Validation loss')
    plt.legend()
    plt.show()
    
    
    # 相对于参考网络有了明显的提升
    # To recap（重述）: 阻止神经网络过拟合最常见的方式有：、
    # * 获取更多的训练数据
    # * 减少神经网络的capacity（这个其实通常指的就是层数，每层的隐藏单元数，以及权重数）
    # * 增加正则化项
    # * 增加dropout层  


def top(epochs):
    if os.path.exists('dpt_model.h5')==True:#如果当前路径存在模型文件，那么就直接读取模型
        # 保存网络结构，载入网络结构
        network = load_model('dpt_model.h5') 
        # print "输出权重",network.get_weights()

    else:
    #否则，就重新开始训练模型，并且保存模型文件
        (train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
        train_plot(epochs,train_data, train_labels,test_data, test_labels)
        
if __name__ == '__main__':
    top(epochs=20)