import tensorflow as tf
import numpy as np
import os
import cv2

def read_img():
    for root, dirs, files in os.walk('./trainImage'):
        # print(files)
        files = files

    print(files[0])
    print(cv2.imread('./trainImage/' + files[0],0).ravel()/255)
    img_msg = []
    for i in files:
        img_msg.append(cv2.imread('./trainImage/' + i,0).ravel()/255)


    for i in range(len(files)):
        files[i] = files[i][:4]
    # print(files)
    for i in files:
        number = [0 for i in range(0, 40)]
        number[int(i[0])] = 1
        number[int(i[1]) + 10] = 1
        number[int(i[2]) + 20] = 1
        number[int(i[3]) + 30] = 1
        files[files.index(i)] = number

    return img_msg,files

img_shape = (160,60)
x = tf.placeholder(tf.float32,[None,img_shape[0] * img_shape[1]])
y = tf.placeholder(tf.float32,[None,40])
keep_prob = tf.placeholder(tf.float32)

def img_cnn(x,img_shape):
    x = tf.reshape(x,[-1,img_shape[0],img_shape[1],1])

    #设置卷积核，w   (160,60)
    w = tf.Variable(tf.random_normal([3,3,1,32]))
    #设置偏移
    b = tf.Variable(tf.random_normal([32]))
    #进行卷积操作并加上偏移，进行激活
    one_convolution = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x,w,strides=[1,1,1,1],padding='SAME'),b))
    #进行池化操作
    one_max_poll = tf.nn.max_pool(one_convolution,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
    #dropout进行随机丢失   (80,30,32)
    one_output = tf.nn.dropout(one_max_poll,keep_prob)

    #设置第二层卷积层
    w1 = tf.Variable(tf.random_normal([3,3,32,64]))
    #第二层卷积偏移
    b1 = tf.Variable(tf.random_normal([64]))
    #第二层卷积操作与激活
    two_convolution = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(one_output,w1,strides=[1,1,1,1],padding='SAME'),b1))
    #第二层卷积池化
    two_max_pool = tf.nn.max_pool(two_convolution,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
    #第二层dropout  (16,40,64)
    two_dropout = tf.nn.dropout(two_max_pool,keep_prob)


    #设置第三层卷积核
    w2 = tf.Variable(tf.random_normal([3,3,64,64]))
    #设置第三层偏移
    b2 = tf.Variable(tf.random_normal([64]))
    #第三层卷积操作并激活
    there_convolution = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(two_dropout,w2,strides=[1,1,1,1],padding='SAME'),b2))
    there_max_pool = tf.nn.max_pool(there_convolution,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
    there_dropout = tf.nn.dropout(there_max_pool,keep_prob)

    #设置全连接层
    w_d = tf.Variable(tf.random_normal([8*20*64,1024]))
    b_d = tf.Variable(tf.random_normal([1024]))

    dense = tf.reshape(there_dropout, [-1, w_d.get_shape().as_list()[0]])
    # tf.matmul(dense, w_d)函数是矩阵相乘，输出维度是 -1*1024
    dense = tf.nn.relu(tf.add(tf.matmul(dense, w_d), b_d))
    dense = tf.nn.dropout(dense, keep_prob)
    # 经过全连接层之后，输出为 一维，1024个向量

    # w_out定义成一个形状为 [1024, 8 * 10] = [1024, 80]
    w_out = tf.Variable(tf.random_normal([1024, 4* 10]))
    b_out = tf.Variable(tf.random_normal([4 * 10]))
    # out 的输出为 8*10 的向量， 8代表识别结果的位数，10是每一位上可能的结果（0到9）
    out = tf.add(tf.matmul(dense, w_out), b_out)
    out = tf.nn.softmax(out)
    # 输出神经网络在当前参数下的预测值
    return out

output = img_cnn(x,img_shape)
print(output)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=y))
    # optimizer 为了加快训练 learning_rate应该开始大，然后慢慢减小
    # tf.train.AdamOptimizer（）函数实现了Adam算法的优化器
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    batch_x, batch_y = read_img()
    batch_x, batch_y = np.array(batch_x), np.array(batch_y)
    print(batch_x[0:100],'++++++++++++++++')

    print(batch_x.shape, batch_y.shape)
    s = 0
    for i in range(1000):
        _, loss_ = sess.run([optimizer, loss], feed_dict={x: batch_x[s:s+300], y: batch_y[s:s+300], keep_prob: 0.75})
        print('当前loss:',loss_)
        print(i)
        s += 100

        correct_prediction = tf.equal(tf.argmax(output, 1), tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
        j = i + 1
        print("第%d轮训练,训练个数%d个" % (j, j * 50))
        # print("正确率预测： " + correct_prediction + "\n")
        print("当前正确率： ")
        print(sess.run(accuracy, feed_dict={x: batch_x[0:1500], y: batch_y[0:1500],keep_prob: 0.75}))
        print(s)
