# #-*- coding:utf-8 -*-
# import sys
# reload(sys)
# sys.setdefaultencoding('utf-8')

import matplotlib.pyplot as plt
import keras
# print keras.__version__
from keras import regularizers
from keras import models
from keras import layers
from keras.datasets import imdb
from keras.utils import to_categorical
from keras.models import load_model
import numpy as np
import os



(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)

def vectorize_sequences(sequences, dimension=10000):
    # Create an all-zero matrix of shape (len(sequences), dimension)
    results = np.zeros((len(sequences), dimension))
    for i, sequence in enumerate(sequences):
        results[i, sequence] = 1.  # set specific indices of results[i] to 1s
    return results

def train_plot(epochs):
#---------------------下面是训练集和验证集---------------------------------
    # Our vectorized training data
    x_train = vectorize_sequences(train_data)
    # Our vectorized test data
    x_test = vectorize_sequences(test_data)
    # Our vectorized labels
    y_train = np.asarray(train_labels).astype('float32')
    y_test = np.asarray(test_labels).astype('float32')
    
    
    
    #----------------以下是参考组---------------------------------------
    original_model = models.Sequential()
    original_model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
    original_model.add(layers.Dense(16, activation='relu'))
    original_model.add(layers.Dense(1, activation='sigmoid'))
    
    original_model.compile(optimizer='rmsprop',
                           loss='binary_crossentropy',
                           metrics=['acc'])
    
    
    original_hist = original_model.fit(x_train, y_train,
                                       epochs=epochs,#①
                                       batch_size=512,
                                       validation_data=(x_test, y_test))
    #----------------以上是参考组---------------------------------------
    #----------------下面是对照组---------------------------------------
    
    
    # 增加权重正则化。
    
    # 一个更简单的模型指的是，模型的参数有更少的熵或者模型有更少的参数
    # 一个更普遍的处理过拟合的做法是，对网络的复杂性施加约束，把权重限制为只取较小的值（这个说法我认为不对，可能是想表达less），
    # 使得权重值的分布更加规范，这个称为“weight regularization”，通过在loss函数中加入一个与圈中相关的损失项来实现。
    # （下面这段英文是原文）
    # Thus a common way to mitigate overfitting is to put constraints on the complexity 
    # of a network by forcing its weights to only take small values, 
    # which makes the distribution of weight values more "regular". 
    # This is called "weight regularization", 
    # and it is done by adding to the loss function of the network a _cost_ associated with having large weights. 
    
    
    # 这种代价有两种风格：
    # L1正则和L2正则


    l2_model = models.Sequential()
    l2_model.add(layers.Dense(16, 
                            kernel_regularizer=regularizers.l2(0.001),#意思是，该层权重矩阵的每个系数都会使网络总损失增加0.001*weight_coefficient_value
                            activation='relu', 
                            input_shape=(10000,)))
    l2_model.add(layers.Dense(16, 
                            kernel_regularizer=regularizers.l2(0.001),
                            activation='relu'))
    l2_model.add(layers.Dense(1, activation='sigmoid'))

    l2_model.compile(optimizer='rmsprop',
                 loss='binary_crossentropy',
                 metrics=['acc'])


# `l2(0.001)` means that every coefficient in the weight matrix of the layer will add `0.001 * weight_coefficient_value` to the total loss of 
# the network. 
# Note that because this penalty is _only added at training time_, the loss for this network will be much higher at training 
# than at test time.
# 这里是L2正则化惩罚项的影响：
    l2_model_hist = l2_model.fit(x_train, y_train,
                             epochs=epochs,#②
                             batch_size=512,
                             validation_data=(x_test, y_test))
    #模型训练，返回log记录

    epochs = range(1, epochs+1)#③
    l2_model_val_loss = l2_model_hist.history['val_loss']
    original_val_loss = original_hist.history['val_loss']
    plt.plot(epochs, original_val_loss, 'b+', label='Original model')
    plt.plot(epochs, l2_model_val_loss, 'bo', label='L2-regularized model')
    plt.xlabel('Epochs')
    plt.ylabel('Validation loss')
    plt.legend()
    plt.show()
    original_model.save('original_model.h5')
    l2_model.save('l2_model.h5')




# As you can see, the model with L2 regularization (dots) has become 
# much more resistant to overfitting than the reference model (crosses), 
# even though both models have the same number of parameters.
# 
# As alternatives to L2 regularization, you could use one of the following Keras weight regularizers:


# L1 regularization
# regularizers.l1(0.001)

# # L1 and L2 regularization at the same time
# regularizers.l1_l2(l1=0.001, l2=0.001)


def top(epochs):
    if os.path.exists('l2_model.h5')==True and os.path.exists('original_model.h5')==True:#如果当前路径存在模型文件，那么就直接读取模型
        # 保存网络结构，载入网络结构
        network = load_model('l2_model.h5') 
        print "输出权重",network.get_weights()
    
    else:#否则，就重新开始训练模型，并且保存模型文件
        train_plot(epochs)


if __name__ == '__main__':
    top(epochs=20)
