# !/usr/bin/python3.5
# -*- coding: utf-8 -*-
import sys
import os
import time
import numpy as np
import tensorflow as tf
from PIL import Image, ImageFilter


sess=tf.InteractiveSession(config=tf.ConfigProto(allow_soft_placement=True))
NUM_CLASSES = 26
SIZE=1280
WIDTH = 32
HEIGHT = 40
iterations = 300

path = os.path.dirname(__file__)
img_path = os.path.join(path + '/img_ret/')


x = tf.placeholder(tf.float32, shape=[None, SIZE])
y_ = tf.placeholder(tf.float32, shape=[None, NUM_CLASSES])
x_image = tf.reshape(x, [-1, WIDTH, HEIGHT, 1])


def weight_variable(shape,name):
    initial=tf.truncated_normal(shape,stddev=0.1)
    return tf.Variable(initial,name=name)

def bias_variable(shape,name):
    initial=tf.constant(0.1,shape=shape)
    return tf.Variable(initial,name=name)

def conv2d(x,W):
    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')

def max_pool(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME")

#数据整理
input_count = 0
for i in range(0, NUM_CLASSES):
    dir = img_path % i  # 这里可以改成你自己的图片目录，i为分类标签
    for rt, dirs, files in os.walk(dir):
        for filename in files:
            input_count += 1  #1254
input_images = np.array([[0] * SIZE for i in range(input_count)]) #shape=(1254,1280)
input_labels = np.array([[0] * NUM_CLASSES for i in range(input_count)]) #shape=(1254,6)

 # 第二次遍历图片目录是为了生成图片数据和标签
index = 0
for i in range(0, NUM_CLASSES):
    dir = img_path % i  # 这里可以改成你自己的图片目录，i为分类标签
    for rt, dirs, files in os.walk(dir):
        for filename in files:
            filename = dir + filename
            img = Image.open(filename)
            im = img.convert('L')
            width = img.size[0] #32
            height = img.size[1] #40
            for h in range(0, height):
                for w in range(0, width):
                    # 通过这样的处理，使数字的线条变细，有利于提高识别准确率
                    if im.getpixel((w, h)) > 230:
                        input_images[index][w + h * width] = 0
                    else:
                        input_images[index][w + h * width] = 1
            input_labels[index][i] = 1
            index += 1


# 第一次遍历图片目录是为了获取图片总数
val_count = 0
for i in range(0, NUM_CLASSES):
    dir = 'tf_car_license_dataset/train_images/validation-set/letters/%s/' % i  # 这里可以改成你自己的图片目录，i为分类标签
    for rt, dirs, files in os.walk(dir):
        for filename in files:
            val_count += 1  #32
# 定义对应维数和各维长度的数组
val_images = np.array([[0] * SIZE for i in range(val_count)])
val_labels = np.array([[0] * NUM_CLASSES for i in range(val_count)])

# 第二次遍历图片目录是为了生成图片数据和标签
index = 0
for i in range(0, NUM_CLASSES):
    dir = 'tf_car_license_dataset/train_images/validation-set/letters/%s/' % i  # 这里可以改成你自己的图片目录，i为分类标签
    for rt, dirs, files in os.walk(dir):
        for filename in files:
            filename = dir + filename
            img = Image.open(filename)
            im = img.convert('L')
            width = img.size[0]
            height = img.size[1]
            for h in range(0, height):
                for w in range(0, width):
                    # 通过这样的处理，使数字的线条变细，有利于提高识别准确率
                    if im.getpixel((w, h)) > 230:
                        val_images[index][w + h * width] = 0
                    else:
                        val_images[index][w + h * width] = 1
            val_labels[index][i] = 1
            index += 1


#第一个卷积层
w_conv1 = weight_variable([5, 5, 1, 16], 'w_conv1')
b_conv1 = bias_variable([16], 'b_conv1')
h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1)
h_pool1 = max_pool(h_conv1)
#
#第二个卷积层
w_conv2 = weight_variable([5, 5, 16, 32], 'w_conv2')
b_conv2 = bias_variable([32], 'b_conv2')
h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
h_pool2 = max_pool(h_conv2)

#relu激活
w_fc1 = weight_variable([8 * 10 * 32, 256], 'w_fc1')
b_fc1 = bias_variable([256], 'b_fc1')
h_pool_flat = tf.reshape(h_pool2, [-1, 8 * 10 * 32])
h_fc1 = tf.nn.relu(tf.matmul(h_pool_flat, w_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
h_drop = tf.nn.dropout(h_fc1, keep_prob)

#全连接层
w_fc2 = weight_variable([256, 26], 'w_fc2')
b_fc2 = bias_variable([26], 'b_fc2')
y_conv = tf.nn.softmax(tf.matmul(h_drop, w_fc2) + b_fc2)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
# cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))#y_conv(2,6),y_(2,6)
train = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
tf.global_variables_initializer().run()
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# 设置每次训练op的输入个数和迭代次数，这里为了支持任意图片总数，定义了一个余数remainder，譬如，如果每次训练op的输入个数为60，图片总数为150张，则前面两次各输入60张，最后一次输入30张（余数30）
batch_size = 50
iterations = iterations
batches_count = int(input_count / batch_size)
remainder = input_count % batch_size
print("训练数据集分成 %s 批, 前面每批 %s 个数据，最后一批 %s 个数据" % (batches_count + 1, batch_size, remainder))
with tf.device('/GPU:0'):
    for it in range(iterations):
                # 这里的关键是要把输入数组转为np.array
                for n in range(batches_count):
                    train.run(feed_dict={x: input_images[n*batch_size:(n+1)*batch_size], y_: input_labels[n*batch_size:(n+1)*batch_size], keep_prob: 0.5})
                if remainder > 0:
                    start_index = batches_count * batch_size;
                    train.run(feed_dict={x: input_images[start_index:input_count-1], y_: input_labels[start_index:input_count-1], keep_prob: 0.5})
                iterate_accuracy = 0
                if it % 5 == 0:
                    iterate_accuracy = accuracy.eval(feed_dict={x: val_images, y_: val_labels, keep_prob: 1.0})
                    print('第 %d 次训练迭代: 准确率 %0.5f%%' % (it, iterate_accuracy * 100),cross_entropy)
                    if iterate_accuracy >= 0.9999 and it >= 150:
                        break;

saver = tf.train.Saver()
if not os.path.exists('save-vehicle'):
    print('不存在训练数据保存目录，现在创建保存目录')
    os.makedirs('save-vehicle')
saver_path = saver.save(sess, "%smodel.ckpt"%('save-vehicle-LETTERS/'))
print('完成训练!')