# coding=utf8

# ocr training by vista
import os
import numpy

import loaders.ocr_loader as ml
import tensorflow as tf

mnist = ml.get_mnist_set(True)

g_rows = 56
g_cols = 28

n_class = 36

sess = tf.InteractiveSession()
x = tf.placeholder("float", shape=[None, g_rows * g_cols])
y_ = tf.placeholder("float", shape=[None, n_class])


def weight_variable(shape,name = None):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial,name=name)


def bias_variable(shape,name = None):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial,name=name)


# input - [batch, in_height, in_width, in_channels]
# filter - [filter_height, filter_width, in_channels, out_channels - 卷积核个数]
# input.in_channels = filter.in_channels
# strides - 卷积时在图像每一维的步长，这是一个一维的向量，长度4
# padding - ["SAME","VALID"] 不同的卷积方式
#       当其为‘SAME’时，表示卷积核可以停留在图像边缘，output_size = input_size
# return - feature map

# NHWC - data format
# output[b, i, j, k] =
#    sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * filter[di, dj, q, k]
def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')


# value - [batch, height, width, channels]
# ksize - The size of the window for each dimension of the input tensor. [ 1 * height * width * 1 ]
# strides - The stride of the sliding window for each dimension of the input tensor - [ 1 * stride * stride * 1 ]
def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1], padding='SAME')

# 5*5 * 1 channel * 32 out channels , convolution core
# W_conv1 = weight_variable([5, 5, 1, 32],"W1")
# b_conv1 = bias_variable([32],"b1")
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])

x_image = tf.reshape(x, [-1,g_rows,g_cols,1])

# 28*28 [filter by 5*5] => -1*28*28*32 [same]
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)

# after pool -> [-1,28/stride-h,28/stride-w,32] -> [-1,14,14,32]
h_pool1 = max_pool_2x2(h_conv1)

# W_conv2 = weight_variable([5, 5, 32, 64],"W2")
# b_conv2 = bias_variable([64],"b2")

W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
# [-1,14,14,64]
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)

# after pool -> [-1,14/stride-h,14/stride-w,32] -> [-1,7,7,64]
h_pool2 = max_pool_2x2(h_conv2)

W_fc1 = weight_variable([g_rows / 4 * g_cols / 4 * 64, 1024])
b_fc1 = bias_variable([1024])

# fully connection layer input size -> [-1,7*7*64]
h_pool2_flat = tf.reshape(h_pool2, [-1, g_rows / 4 * g_cols / 4 * 64])
# output size -> [-1,1024]
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

# set layer1 activate percent
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

W_fc2 = weight_variable([1024, n_class])

# output size -> [-1,10]
b_fc2 = bias_variable([n_class])

# use softmax activator
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))

# use adam optimizer
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

# cal accuarcy
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

sess.run(tf.global_variables_initializer())

saver = tf.train.Saver(tf.global_variables())

# for i in range(200):
#     batch = mnist.train.next_batch(10)
#     if i%10 == 0:
#         train_accuracy = accuracy.eval(feed_dict={
#             x:batch[0], y_: batch[1], keep_prob: 1.0})
#         print "step %d, training accuracy %g"%(i, train_accuracy)
#     train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})

model_dir = '/Users/vista/PycharmProjects/data/model/ocr_0/'
# saver.save(sess, model_dir + 'LeNet5')
if os.path.exists(model_dir + 'LeNet5.index'):
    saver.restore(sess, model_dir + 'LeNet5')

# aaa = mnist.train.images[0]
# aaa = numpy.reshape(aaa,[-1,aaa.shape[0]])
#
# print sess.run(tf_vista.argmax(y_conv,1),feed_dict={x: aaa, keep_prob: 1.0})

def save_sess(sess0):
    sess0.run(W_conv1).tofile('/Users/vista/CLionProjects/opencv-ocr/model/w1.dat')
    sess0.run(b_conv1).tofile('/Users/vista/CLionProjects/opencv-ocr/model/b1.dat')
    sess0.run(W_conv2).tofile('/Users/vista/CLionProjects/opencv-ocr/model/w2.dat')
    sess0.run(b_conv2).tofile('/Users/vista/CLionProjects/opencv-ocr/model/b2.dat')
    sess0.run(W_fc1).tofile('/Users/vista/CLionProjects/opencv-ocr/model/Wf1.dat')
    sess0.run(b_fc1).tofile('/Users/vista/CLionProjects/opencv-ocr/model/bf1.dat')
    sess0.run(W_fc2).tofile('/Users/vista/CLionProjects/opencv-ocr/model/Wf2.dat')
    sess0.run(b_fc2).tofile('/Users/vista/CLionProjects/opencv-ocr/model/bf2.dat')

save_sess(sess)

print "test accuracy %g"%accuracy.eval(feed_dict={
    x: mnist.train.images, y_: mnist.train.labels, keep_prob: 1.0})