# http://blog.csdn.net/chancein007/article/details/72862265
# 对比学习使用神经网络
# https://www.jianshu.com/p/9efae7a20493

# 深度学习中的epochs，batch_size，iterations详解
# http://blog.csdn.net/u013041398/article/details/72841854
import time
from keras import backend as K
import numpy as np

from data_load_tool import LoadHandWriteData

np.random.seed(1337)
# import os
# os.environ['CUDA_VISIBLE_DEVICES'] = "0"

from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, MaxPooling2D, Flatten
from keras.layers.convolutional import Conv2D
from keras.optimizers import RMSprop, SGD
import matplotlib.pyplot as plt

# Download the mnist to the path ~/.keras/datasets/ if it is the first time to be called
# XShape(6000 28X28),y shape(10,000,)
# (X_train, y_train), (X_test, y_test) = mnist.load_data()
(X_train, y_train), (X_test, y_test) = LoadHandWriteData.load_data()

# print(X_train[0])
# print(y_train[0])
# plt.imshow(X_train[0], cmap='gray')
# plt.show()
# plt.close()
# print("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&")
# print(y_test[0])
# plt.imshow(X_test[0], cmap='gray')
# plt.show()
# exit(0)

# Data pre-processing
X_train = X_train.reshape(X_train.shape[0], -1) / 255  # Normalize
X_test = X_test.reshape(X_test.shape[0], -1) / 255  # Normalize
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)



# Other way to build your neural net
# TODO: simple way
# model = Sequential([
#     Dense(32, input_dim=784),
#     Activation('relu'),
#     Dense(10),
#     Activation('softmax')
# ])

# TODO: CNN way
# 参考此部分进行了修改  https://www.cnblogs.com/surfzjy/p/6445437.html

img_rows, img_cols = 28, 28
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# exit(0)
# 参考此部分进行了修改  https://www.cnblogs.com/surfzjy/p/6445437.html

model = Sequential(
    [Conv2D(
        nb_filter=32,
        nb_row=5,
        nb_col=5,
        border_mode='same',  # Padding method
        dim_ordering='th',
        # if use tensorflow, to set the input dimension order to theano ("th") style, but you can change it.
        input_shape=(  # channels
                     28, 28, 1)  # height & width
    ),
        Activation('relu'),

        MaxPooling2D(
            pool_size=(2, 2),
            strides=(2, 2),
            border_mode='same',  # Padding method
        ),
        Conv2D(64, 5, 5, border_mode='same'),
        Activation('relu'),
        MaxPooling2D(pool_size=(2, 2), border_mode='same'),
        Flatten(),
        Dense(1024),
        Activation('relu'),
        Dense(10),
        Activation('softmax')
    ]
)

# Other way to define optimizer

# ========= Test 1 =================
# sgd = SGD(lr=0.5, decay=1e-6, momentum=0.9, nesterov=True)
# model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['accuracy'])  # 指优化方法sgd

# ========= Test 2 =================
# model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])

# ========= Test 3 =================
# model.compile(
#     optimizer='rmsprop',
#     loss='categorical_crossentropy',
#     metrics=['accuracy']
# )

# ========= Test 4 =================
rmsprop = RMSprop(lr=0.0005, rho=0.9, epsilon=1e-06, decay=0.0)
model.compile(optimizer=rmsprop, loss='categorical_crossentropy', metrics=['accuracy'])

print('Training..............')
# Other way to train the model
"""
（1）iteration：表示1次迭代，每次迭代更新1次网络结构的参数；
（2）batch_size：1次迭代所使用的样本量；
（3）epoch：1个epoch表示过了1遍训练集中的所有样本。
需要补充的是，在深度学习领域中，常用随机梯度下降算法（Stochastic Gradient Descent, SGD）
训练深层结构，它有一个好处就是并不需要遍历全部的样本，当数据量非常大时十分有效。此时，可根据实际
问题来定义epoch，例如定义10000次迭代为1个epoch，若每次迭代的batch_size设为256，那么1个
epoch相当于过了2560000个训练样本。

"""
model.fit(X_train, y_train, epochs=2, batch_size=32)

print('\nTesting..............')
loss, accuracy = model.evaluate(X_test, y_test)  # 模型设置了loss和accuracy，因此输出量为2个
# K.clear_session()
print('\nTest lost:', loss)
print('Test accuracy', accuracy)
