# Lab 11 MNIST and Convolutional Neural Network
import tensorflow as tf
import random

# from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
import numpy as np
import os

# os.environ["TF_CPP_MIN_LOG_LEVEL"]='2'
tf.set_random_seed(777)  # reproducibility

# 读入原始图片数据：包括X,
dat = np.loadtxt('img_16_10k.txt', delimiter=',', dtype=int)
imgArr = dat[:, 0:-1]/255  #图片数据的X缩放处理
total = imgArr.shape[0]  #总样本个数

# 读入原始标签数据
Y_one_hot = np.eye(10)[dat[:, -1]]   #对y独热编码
print(Y_one_hot)

g_b=0
# 自己实现函next_batch数，每次返回一批数据
def next_batch(size):
    global g_b
    xb = imgArr[g_b:g_b+size]
    yb = Y_one_hot[g_b:g_b+size]
    g_b = g_b + size
    return xb,yb
#summary日志存储路径
TB_SUMMARY_DIR = './cnnfiledir1'

def train():
    global g_b
    # mnist = input_data.read_data_sets("../MNIST_data/", one_hot=True)

    # hyper parameters: 定义超参数
    learning_rate = 0.01  #学习率
    training_epochs = 15  #总迭代次数
    batch_size = 100    #每批次样本数

    # 定义输入输出y占位符：input place holders
    X = tf.placeholder(tf.float32, [None, 256])
    X_img = tf.reshape(X, [-1, 16, 16, 1])   # 转换成tf输入格式 img 16x16x1 (black/white)
    Y = tf.placeholder(tf.float32, [None, 10])
    #定义第1层卷积参数，卷积核3*3， 输出通道数64，步长1，填充padding='same'
    W1 = tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.01))
    L1 = tf.nn.conv2d(X_img, W1, strides=[1, 1, 1, 1], padding='SAME')  # same:16/1, (?, 16,16,64)
    L1 = tf.nn.relu(L1)   #relu激活函数
    L1 = tf.nn.max_pool(L1, ksize=[1, 2, 2, 1],   #池化层：核2*2, 步长：2*2，填充：same
                        strides=[1, 2, 2, 1], padding='SAME')    # same；16/2 (?,8, 8, 64)
    L1 = tf.nn.dropout(L1, keep_prob=0.9)    #dropout: 保留0.9
    #定义第2层卷积：卷积核3*3， 输入通道64，输出通道128，步长：1*1，填充same
    W2 = tf.Variable(tf.random_normal([3, 3, 64, 128], stddev=0.01))   #same: 8/1 (?, 8,8,128)
    L2 = tf.nn.conv2d(L1, W2, strides=[1, 1, 1, 1], padding='SAME')
    L2 = tf.nn.relu(L2)
    L2 = tf.nn.max_pool(L2, ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1], padding='SAME')   #same: 8/2 (?, 4,4,128)
    L2 = tf.nn.dropout(L2, keep_prob=0.9)
    #   展开：4*4*128
    L2_flat = tf.reshape(L2, [-1, 4 * 4 * 128])

    # 定义全连接层参数：Final FC 4x4x128 inputs -> 10 outputs
    W = tf.get_variable("W",
                         shape=[4 * 4 * 128, 10],
                         initializer=tf.contrib.layers.xavier_initializer())  #初始化使用xavier方法
    b = tf.Variable(tf.random_normal([10]))
    logits = tf.matmul(L2_flat, W) + b   #前向传播

    # 定义代价和Adam优化器：指定学习率,最小化代价 define cost/loss & optimizer
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
        logits=logits, labels=Y))
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
    #记录损失到summary
    tf.summary.scalar("loss", cost)

    global_step = 0
    # 汇总Summary
    summary = tf.summary.merge_all()

    # initialize
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    writer = tf.summary.FileWriter(TB_SUMMARY_DIR, sess.graph)   #定义summary文件对象

    # train my model
    print('Learning started. It takes sometime.')
    for epoch in range(training_epochs):
        avg_cost = 0
        total_batch = int(total * 0.8 / batch_size)  #计算总批次
        g_b = 0

        for i in range(total_batch):
            # batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            batch_xs, batch_ys = next_batch(batch_size)
            feed_dict = {X: batch_xs, Y: batch_ys}
            s, c, _ = sess.run([summary, cost, optimizer], feed_dict=feed_dict)
            avg_cost += c / total_batch
            writer.add_summary(s, global_step=global_step)
            global_step = global_step + 1

        print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))

    print('Learning Finished!')

    # 检测模型的准确率Test model and check accuracy
    correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print('Accuracy:', sess.run(accuracy, feed_dict={
          # X: mnist.test.images, Y: mnist.test.labels}))
          X: imgArr[int(total * 0.8):], Y: Y_one_hot[int(total * 0.8):]}))
    #随机挑选样本，并输出预测值和真实值
    r = random.randint(int(total * 0.8), total - 1)
    print("Label: ", sess.run(tf.argmax(Y_one_hot[r:r + 1], 1)))
    print("Prediction: ", sess.run(
        tf.argmax(logits, 1), feed_dict={X: imgArr[r:r + 1]}))

    # img = plt.imread('3.bmp')
    # gravity = np.array([1., 0., 0.])
    # greyimg = np.dot(255 - img, gravity)/255
    # print("Prediction: ", sess.run(
    #     tf.argmax(logits, 1), feed_dict={X: greyimg.reshape([1, 196])}))
    # plt.imshow(greyimg.reshape(14, 14), cmap='Greys', interpolation='nearest')
    # plt.show()

train()

'''
Epoch: 0001 cost = 0.340291267
Epoch: 0002 cost = 0.090731326
Epoch: 0003 cost = 0.064477619
Epoch: 0004 cost = 0.050683064
Epoch: 0005 cost = 0.041864835
Epoch: 0006 cost = 0.035760704
Epoch: 0007 cost = 0.030572132
Epoch: 0008 cost = 0.026207981
Epoch: 0009 cost = 0.022622454
Epoch: 0010 cost = 0.019055919
Epoch: 0011 cost = 0.017758641
Epoch: 0012 cost = 0.014156652
Epoch: 0013 cost = 0.012397016
Epoch: 0014 cost = 0.010693789
Epoch: 0015 cost = 0.009469977
Learning Finished!
Accuracy: 0.9885
'''
