﻿#!/usr/bin/python
# -*- coding: utf-8 -*-  
from sklearn.model_selection import train_test_split
import sys
import socket
import os
import pysequoiadb
from pysequoiadb import client
from pysequoiadb.error import SDBBaseError
from pysequoiadb.error import SDBEndOfCursor
from tensorflow.python import graph_util
import argparse
import time
import subprocess
import random
import pymysql
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, VARCHAR, Integer, FLOAT, String
from sqlalchemy.orm import sessionmaker
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt

from PIL import Image

plt.switch_backend('agg')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

def DefineArgs():
    flags = argparse.ArgumentParser()
    flags.add_argument('-gpu', default=0, type=float, help='gpu number')
    flags.add_argument('-action',default='train',help='action to execute')
    flags.add_argument('-modelid', help='id of current model', dest="modelid")
    flags.add_argument('-iter', type=int,
                        default=500, help='numbers of iteration')
    flags.add_argument('-ddir',default='training-set',help='dir of train data')
    flags.add_argument('-lr',type=float,default=1e-4,help='learn rate')
    flags.add_argument('-rows',type=int,default=40,help='rows of image')
    flags.add_argument('-cols',type=int,default=20,help='cols of image')
    flags.add_argument('-batch',type=int,default=60,help='batch size')
    flags.add_argument('-name',default='ocr',help='name of model')
    flags.add_argument('-ip', type=str, default="10.244.171.143", help='ip of sdb that will to be checked') 
    flags.add_argument('-mysql',default="mysql+pymysql://cddba:foxconn2019..@10.244.171.150:3306/aimonitor",help='jdbc of mysql')
    FLAGS = flags.parse_args()
    return FLAGS

base = declarative_base()

class trainInfo(base):
    __tablename__ = 'traininfo_ocr'
    modelid = Column(VARCHAR(40), primary_key=True)
    modelname = Column(VARCHAR(30), nullable=False)
    status = Column(Integer, default=0, nullable=False)
    update_time = Column(VARCHAR(30), nullable=False)
    pb_path = Column(VARCHAR(100), nullable=False)
    loss_path = Column(VARCHAR(100), nullable=False)
    accuracy = Column(FLOAT(),nullable=False)
    label_txt = Column(VARCHAR(100))

def MysqlConnect(jdbc: str,status=0, pb=" ", fig=" ",acc=0.0,txt=' '):
    engine = create_engine(jdbc)
    Session = sessionmaker(engine)
    session = Session()
    print(f'Flags.modelid,{FLAGS.modelid}')
    traininfo = session.query(trainInfo).filter(trainInfo.modelid == FLAGS.modelid).one_or_none()
    if traininfo is None:
        print('modelid not find')
        return
    print(f'update status {status}')
    traininfo.status = status
    traininfo.update_time = time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(time.time()))
    traininfo.pb_path = pb
    traininfo.loss_path = fig
    traininfo.accuracy = acc
    traininfo.label_txt = txt
    session.add(traininfo)
    session.commit()
    session.close()

seed = random.seed(20)
FLAGS = DefineArgs()
WIDTH = FLAGS.cols
HEIGHT = FLAGS.rows
label_txt = {}
ip = FLAGS.ip
print("ip",ip)
SIZE = WIDTH*HEIGHT
iterations = FLAGS.iter
gpun = FLAGS.gpu
LETTERS_DIGITS=[]
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpun)
mysql = FLAGS.mysql if 'mysql' in FLAGS.mysql else 'mysql+pymysql' + FLAGS.mysql
basedir = f'{FLAGS.ddir}'
SAVER_DIR = "/aidfs/003/train-saver/digits/"
oridir = '/aidfs/003/'
NUM_CLASSES = len(os.listdir(basedir))
license_num = ""
time_begin = time.time()

print('numclass:',NUM_CLASSES)

MysqlConnect(mysql,status=1)

# 定义输入节点，对应于图片像素值矩阵集合和图片标签(即所代表的数字)
x = tf.placeholder(tf.float32,name="inputs_placeholder", shape=[None, SIZE])
y_ = tf.placeholder(tf.float32,name="labels_placeholder", shape=[None,NUM_CLASSES])

x_image = tf.reshape(x, [-1, HEIGHT, WIDTH, 1])

# 定义卷积函数
def conv_layer(inputs, W, b, conv_strides, kernel_size, pool_strides, padding):
    L1_conv = tf.nn.conv2d(inputs, W, strides=conv_strides, padding=padding)
    L1_relu = tf.nn.relu(L1_conv + b)
    return tf.nn.max_pool(L1_relu, ksize=kernel_size, strides=pool_strides, padding='SAME')

# 定义全连接层函数
def full_connect(inputs, W, b):
    return tf.nn.relu(tf.matmul(inputs, W) + b)

def freeze_graph(input_checkpoint, output_graph):
    output_node_names = "inputs_placeholder,keep_prob,correct_sm"
    saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=True)
    graph = tf.get_default_graph()
    input_graph_def = graph.as_graph_def()

    with tf.Session() as sess:
        saver.restore(sess, input_checkpoint)
        output_graph_def = graph_util.convert_variables_to_constants(
            sess=sess,
            input_graph_def=input_graph_def,
            output_node_names=output_node_names.split(","))

        with tf.gfile.GFile(output_graph, "wb") as f:
            f.write(output_graph_def.SerializeToString())
        print("%d ops in the final graph." % len(output_graph_def.node))

if __name__ =='__main__' and FLAGS.action == 'train':
    # 第一次遍历图片目录是为了获取图片总数
    for i,dir in enumerate(os.listdir(basedir)):
        label_txt[i] = dir
    input_count = 0
    for i in list(label_txt.values()):
        dir = basedir+'/%s/' % i           # 这里可以改成你自己的图片目录，i为分类标签
        for rt, dirs, files in os.walk(dir):
            for filename in files:
                input_count += 1

    # 定义对应维数和各维长度的数组
    input_images = np.array([[0]*SIZE for i in range(input_count)])
    input_labels = np.array([[0]*NUM_CLASSES for i in range(input_count)])
    print("input_count ",input_count)
    # 第二次遍历图片目录是为了生成图片数据和标签
    index = 0
    for i,j in label_txt.items():
        dir = basedir+'/%s/' % j        # 这里可以改成你自己的图片目录，i为分类标签

        for rt, dirs, files in os.walk(dir):
            for filename in files:
                filename = dir + filename
                img:Image.Image = Image.open(filename)
                img:Image.Image = img.resize(size=(WIDTH,HEIGHT))
                if img.mode != "1":
                    img = img.convert("1")
                width = img.size[0]
                height = img.size[1]
                for h in range(0, height):
                    for w in range(0, width):
                        # 通过这样的处理，使数字的线条变细，有利于提高识别准确率
                        if img.getpixel((w, h)) > 230:
                            input_images[index][w+h*width] = 0
                        else:
                            input_images[index][w+h*width] = 1
                input_labels[index][i] = 1
                index += 1
    input_images,val_images,input_labels,val_labels = train_test_split(input_images,
                                                                input_labels,
                                                                test_size=0.2,
                                                                random_state=seed)
    print(input_images.shape,input_labels.shape)
    # 第一次遍历图片目录是为了获取图片总数
    losses = []
    acc = 0.0
    with tf.Session() as sess:
        # 第一个卷积层
        W_conv1 = tf.Variable(tf.truncated_normal([8, 8, 1, 16], stddev=0.1), name="W_conv1")
        b_conv1 = tf.Variable(tf.constant(0.1, shape=[16]), name="b_conv1")
        conv_strides = [1, 1, 1, 1]
        kernel_size = [1, 2, 2, 1]
        pool_strides = [1, 2, 2, 1]
        L1_pool = conv_layer(x_image, W_conv1, b_conv1, conv_strides, kernel_size, pool_strides, padding='SAME')

        # 第二个卷积层
        W_conv2 = tf.Variable(tf.truncated_normal([5, 5, 16, 32], stddev=0.1), name="W_conv2")
        b_conv2 = tf.Variable(tf.constant(0.1, shape=[32]), name="b_conv2")
        conv_strides = [1, 1, 1, 1]
        kernel_size = [1, 1, 1, 1]
        pool_strides = [1, 1, 1, 1]
        L2_pool = conv_layer(L1_pool, W_conv2, b_conv2, conv_strides, kernel_size, pool_strides, padding='SAME')
        print(L2_pool.shape[1],type(int(L2_pool.shape[1])))
        flattenshape = int(L2_pool.shape[1])*int(L2_pool.shape[2])*int(L2_pool.shape[-1])
        

        # 全连接层
        W_fc1 = tf.Variable(tf.truncated_normal([flattenshape, 512], stddev=0.1), name="W_fc1")
        b_fc1 = tf.Variable(tf.constant(0.1, shape=[512]), name="b_fc1")
        h_pool2_flat = tf.reshape(L2_pool, [-1, flattenshape])
        h_fc1 = full_connect(h_pool2_flat, W_fc1, b_fc1)


        # dropout
        keep_prob = tf.placeholder(tf.float32,name='keep_prob')

        h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)


        # readout层
        W_fc2 = tf.Variable(tf.truncated_normal([512, NUM_CLASSES], stddev=0.1), name="W_fc2")
        b_fc2 = tf.Variable(tf.constant(0.1, shape=[NUM_CLASSES]), name="b_fc2")

        # 定义优化器和训练op
        y_conv=tf.matmul(h_fc1_drop, W_fc2) + b_fc2 # logit
        y_pred = tf.nn.softmax(y_conv,name='correct_sm')
        cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
        train_step = tf.train.AdamOptimizer(FLAGS.lr).minimize(cross_entropy)
        correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1),name='correct_pred')
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        sess.run(tf.global_variables_initializer())

        time_elapsed = time.time() - time_begin
        print("读取图片文件耗费时间：%d秒" % time_elapsed)
        time_begin = time.time()

        print ("一共读取了 %s 个训练图像， %s 个标签" % (input_count, input_count))

        # 设置每次训练op的输入个数和迭代次数，这里为了支持任意图片总数，定义了一个余数remainder，譬如，如果每次训练op的输入个数为60，图片总数为150张，则前面两次各输入60张，最后一次输入30张（余数30）
        batch_size = FLAGS.batch
        iterations = iterations
        batches_count = int(input_count / batch_size)
        remainder = input_count % batch_size
        print ("训练数据集分成 %s 批, 前面每批 %s 个数据，最后一批 %s 个数据" % (batches_count+1, batch_size, remainder))
        if not getGpuFromSdb(ip, str(int(gpun))):
            MysqlConnect(mysql, status=3)
            sys.exit(1)
        # 执行训练迭代
        for it in range(iterations):
            # 这里的关键是要把输入数组转为np.array
            for n in range(batches_count):
                train_step.run(feed_dict={x: input_images[n*batch_size:(n+1)*batch_size], y_: input_labels[n*batch_size:(n+1)*batch_size], keep_prob: 0.5})
            if remainder > 0:
                start_index = batches_count * batch_size
                train_step.run(feed_dict={x: input_images[start_index:input_count-1], y_: input_labels[start_index:input_count-1], keep_prob: 0.5})

            # 每完成五次迭代，判断准确度是否已达到100%，达到则退出迭代循环
            iterate_accuracy = 0
            if it%5 == 0:
                iterate_accuracy = accuracy.eval(feed_dict={x: val_images, y_: val_labels, keep_prob: 1.0})
                iterate_loss = cross_entropy.eval(feed_dict={x: val_images, y_: val_labels, keep_prob: 1.0})
                print('第 %d 次训练迭代: 准确率 %0.5f%%,  损失值%0.5f%%' % (it, iterate_accuracy*100,iterate_loss*100))
                losses.append(iterate_loss*100)
                acc=iterate_accuracy
                if iterate_accuracy >= 0.9999 and it >= iterations:
                    acc = iterate_accuracy
                    break
            

        print ('完成训练!')
        DateName = FLAGS.name + "_" + time.strftime('%Y%m%d_%H%M', time.localtime(time.time()))
        PB = oridir+"pb_bak/"+DateName+".pb"
        txtDir = oridir+"pb_bak/"+DateName +".txt"
        pfigname = '/tmp/tmpModel/' + "loss_bak/" + DateName + ".jpg"
        ofigname = '/aidfs/003/loss_bak/'
        print(DateName,acc)
        time_elapsed = time.time() - time_begin
        print ("训练耗费时间：%d秒" % time_elapsed)
        time_begin = time.time()

        x = [i + 1 for i in range(len(losses))]
        plt.figure()
        plt.plot(x, losses, label="train_loss", color="red", linewidth=2)
        plt.legend()
        plt.xlabel("step")
        plt.ylabel("loss")
        plt.title("train-loss fig")
        plt.savefig(pfigname)
        r4 = subprocess.Popen(f'mv {pfigname} {ofigname}', shell=True, stdout=subprocess.PIPE,
                              stderr=subprocess.STDOUT)

        # 保存训练结果
        if not os.path.exists(SAVER_DIR):
            print ('不存在训练数据保存目录，现在创建保存目录')
            os.makedirs(SAVER_DIR)

        saver = tf.train.Saver()
        saver_path = saver.save(sess, "%smodel.ckpt"%(SAVER_DIR))

    freeze_graph(SAVER_DIR+"model.ckpt",PB)
    print('the operation that ckpt convert to pb is success!')
    # for op in tf.get_default_graph().get_operations()[-50:-20]:
    #     print(op.name)
    with open(txtDir, 'w') as f:
        for key, value in label_txt.items():
            f.write(f"{str(key)}\t{value}\n")
    MysqlConnect(mysql,2,pb=DateName+".pb",fig=DateName+".jpg",acc=float(acc),txt=DateName+".txt")

if __name__ == '__main__' and FLAGS.action =='predict':
    saver = tf.train.import_meta_graph("%smodel.ckpt.meta"%(SAVER_DIR))
    with tf.Session() as sess:
        model_file=tf.train.latest_checkpoint(SAVER_DIR)
        saver.restore(sess, model_file)

        # 第一个卷积层
        W_conv1 = sess.graph.get_tensor_by_name("W_conv1:0")
        b_conv1 = sess.graph.get_tensor_by_name("b_conv1:0")
        conv_strides = [1, 1, 1, 1]
        kernel_size = [1, 2, 2, 1]
        pool_strides = [1, 2, 2, 1]
        L1_pool = conv_layer(x_image, W_conv1, b_conv1, conv_strides, kernel_size, pool_strides, padding='SAME')

        # 第二个卷积层
        W_conv2 = sess.graph.get_tensor_by_name("W_conv2:0")
        b_conv2 = sess.graph.get_tensor_by_name("b_conv2:0")
        conv_strides = [1, 1, 1, 1]
        kernel_size = [1, 1, 1, 1]
        pool_strides = [1, 1, 1, 1]
        L2_pool = conv_layer(L1_pool, W_conv2, b_conv2, conv_strides, kernel_size, pool_strides, padding='SAME')


        # 全连接层
        W_fc1 = sess.graph.get_tensor_by_name("W_fc1:0")
        b_fc1 = sess.graph.get_tensor_by_name("b_fc1:0")
        h_pool2_flat = tf.reshape(L2_pool, [-1, 6400])
        h_fc1 = full_connect(h_pool2_flat, W_fc1, b_fc1)


        # dropout
        keep_prob = tf.placeholder(tf.float32)

        h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)


        # readout层
        W_fc2 = sess.graph.get_tensor_by_name("W_fc2:0")
        b_fc2 = sess.graph.get_tensor_by_name("b_fc2:0")

        # 定义优化器和训练op
        conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

        for n in [0,2,7]:
            path = "test_images/%s.jpg" % (n)
            img = Image.open(path)
            if img.mode != "1":
                img = img.convert("1")
            width = img.size[0]
            height = img.size[1]

            img_data = [[0]*SIZE for i in range(1)]
            for h in range(0, height):
                for w in range(0, width):
                    if img.getpixel((w, h)) < 190:
                        img_data[0][w+h*width] = 1
                    else:
                        img_data[0][w+h*width] = 0

            result = sess.run(conv, feed_dict = {x: np.array(img_data), keep_prob: 1.0})

            max1 = 0
            max2 = 0
            max3 = 0
            max1_index = 0
            max2_index = 0
            max3_index = 0
            for j in range(NUM_CLASSES):
                if result[0][j] > max1:
                    max1 = result[0][j]
                    max1_index = j
                    continue
                if (result[0][j]>max2) and (result[0][j]<=max1):
                    max2 = result[0][j]
                    max2_index = j
                    continue
                if (result[0][j]>max3) and (result[0][j]<=max2):
                    max3 = result[0][j]
                    max3_index = j
                    continue

            license_num = license_num + LETTERS_DIGITS[max1_index]
            print ("概率：  [%s %0.2f%%]    [%s %0.2f%%]    [%s %0.2f%%]" % (LETTERS_DIGITS[max1_index],max1*100, LETTERS_DIGITS[max2_index],max2*100, LETTERS_DIGITS[max3_index],max3*100))

        print ("车牌编号是: 【%s】" % license_num)
