import os
import numpy as np
import cv2
import tensorflow as tf
from sklearn.model_selection import train_test_split


class PlateCNN:
    def __init__(self):
        """
        初始化：
        self.img_w,self.img_h：规定的图片的尺寸为136*36
        self.y_size：输出的结果只有两种可能‘有或者没有’用两个数就能承接
        self.batch_size：每次读取图片的数量
        self_learn_rate：optimizer的学习率
        self.x_place：读取的图片文件，创建一个tensor对象
        self.y_place：结果只有两种可能，创建一个tensor对象
        self.keep_place：进行dropout时需要使用的tensor对象
        * 这三个在创建时不要知道具体的输入值，所以使用placeholder就可以
        * 在开启会话的时候进行tf.global_variables_initialize()对其进行分配空间即可
        """
        self.img_w, self.img_h = 136, 36
        self.y_size = 2
        self.batch_size = 100
        self.learn_rate = 0.001

        self.x_place = tf.placeholder(dtype=tf.float32, shape=[None, self.img_h, self.img_w, 3], name='x_place')
        self.y_place = tf.placeholder(dtype=tf.float32, shape=[None, self.y_size], name='y_place')
        self.keep_place = tf.placeholder(dtype=tf.float32, name='keep_place')

    def cnn_construct(self):
        """
        三层卷积池化，并dropout一些神经元
        两层全连接，并dropout一些神经元
        :return:
        """
        x_input = tf.reshape(self.x_place, shape=[-1, self.img_h, self.img_w, 3])  # x_input shape(?,136,36,3)

        cw0 = tf.Variable(tf.random_normal(shape=[3, 3, 3, 16], stddev=0.01), dtype=tf.float32)
        cb0 = tf.Variable(tf.random_normal(shape=[16]), dtype=tf.float32)
        conv0 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x_input, filter=cw0, strides=[1, 1, 1, 1], padding='SAME'), cb0))
        conv0 = tf.nn.max_pool(conv0, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
        # conv0 shape=(?, 18, 68, 16)
        conv0 = tf.nn.dropout(conv0, self.keep_place)

        cw1 = tf.Variable(tf.random_normal(shape=[3, 3, 16, 32], stddev=0.01), dtype=tf.float32)
        cb1 = tf.Variable(tf.random_normal(shape=[32]), dtype=tf.float32)
        conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv0, filter=cw1, strides=[1, 1, 1, 1], padding='SAME'), cb1))
        conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                               padding='SAME')  # conv1 shape(?, 9, 34, 32)
        conv1 = tf.nn.dropout(conv1, self.keep_place)

        cw2 = tf.Variable(tf.random_normal(shape=[3, 3, 32, 64], stddev=0.01), dtype=tf.float32)
        cb2 = tf.Variable(tf.random_normal(shape=[64]), dtype=tf.float32)
        conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, filter=cw2, strides=[1, 1, 1, 1], padding='SAME'), cb2))
        conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')# cov2 shape(?,5,17,64)
        conv2 = tf.nn.dropout(conv2, self.keep_place)

        cw3 = tf.Variable(tf.random_normal(shape=[3, 3, 64, 128], stddev=0.01), dtype=tf.float32)
        cb3 = tf.Variable(tf.random_normal(shape=[128]), dtype=tf.float32)
        conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, filter=cw3, strides=[1, 1, 1, 1], padding='SAME'), cb3))
        conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                               padding='SAME')  # conv3 shape(? 3,9,128)
        conv3 = tf.nn.dropout(conv3, self.keep_place)

        conv_out = tf.reshape(conv3, shape=[-1, 3 * 9 * 128])

        fw1 = tf.Variable(tf.random_normal(shape=[3 * 9 * 128, 1024], stddev=0.01), dtype=tf.float32)
        fb1 = tf.Variable(tf.random_normal(shape=[1024]), dtype=tf.float32)
        fully1 = tf.nn.relu(tf.add(tf.matmul(conv_out, fw1), fb1))  # fully1 shape[-1,1024]
        fully1 = tf.nn.dropout(fully1, self.keep_place)

        fw2 = tf.Variable(tf.random_normal(shape=[1024, 1024], stddev=0.01), dtype=tf.float32)
        fb2 = tf.Variable(tf.random_normal(shape=[1024]), dtype=tf.float32)
        fully2 = tf.nn.relu(tf.add(tf.matmul(fully1, fw2), fb2))  # fully2 shape[-1,1024]
        fully2 = tf.nn.dropout(fully2, self.keep_place)

        fw3 = tf.Variable(tf.random_normal(shape=[1024, self.y_size], stddev=0.01), dtype=tf.float32)
        fb3 = tf.Variable(tf.random_normal(shape=[self.y_size]), dtype=tf.float32)
        fully3 = tf.add(tf.matmul(fully2, fw3), fb3, name='out_put')  # fully3 shape[-1,2],增加偏置

        return fully3

    def train(self, train_data_dir, model_save_path):
        """
        将训练集中的图片进行读取
        将文件进行拆分
        承接cnn模型返回的全连接模型
        承接图片数据和one_hot数据
        开启会话
        :param train_data_dir: ./data/train/cnn_plate_train 图片数据存放路径
        :param model_save_path: 训练模型存放路径
        :return:
        """
        print('ready load train dataset')
        x, y = self.init_data(train_data_dir)
        print('success load ' + str(len(y)) + ' datas')
        train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.2, random_state=0)
        # 将图片数据拆分成train_x和test_x两部分
        # 将one_hot值也拆分为train_y和test_y两部分

        out_put = self.cnn_construct()
        print(out_put)
        predicts = tf.nn.softmax(out_put)  # shape (-1,2)
        predicts = tf.argmax(predicts, axis=1)
        actual_y = tf.argmax(self.y_place, axis=1)
        accuracy = tf.reduce_mean(tf.cast(tf.equal(predicts, actual_y), dtype=tf.float32))
        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=out_put, labels=self.y_place))  # 损失函数
        opt = tf.train.AdamOptimizer(self.learn_rate)  # 优化器
        train_step = opt.minimize(cost)  # 回调函数

        with tf.Session() as sess:
            init = tf.global_variables_initializer()
            sess.run(init)
            step = 0
            saver = tf.train.Saver()  # 创建文件保存对象
            while True:
                train_index = np.random.choice(len(train_x), self.batch_size, replace=False)  # 随便从测试图片数据中抽取100个
                train_randx = train_x[train_index]
                train_randy = train_y[train_index]
                _, loss = sess.run([train_step, cost], feed_dict={self.x_place: train_randx,
                                                                  self.y_place: train_randy, self.keep_place: 0.75})
                step += 1
                print(step, loss)

                if step % 10 == 0:
                    test_index = np.random.choice(len(test_x), self.batch_size, replace=False)
                    test_randx = test_x[test_index]
                    test_randy = test_y[test_index]
                    acc = sess.run(accuracy, feed_dict={self.x_place: test_randx,
                                                        self.y_place: test_randy, self.keep_place: 1.0})
                    print('accuracy:' + str(acc))
                    if acc > 0.99 and step > 1500:
                        saver.save(sess, model_save_path)
                        break

    def test(self, x_images, test_model_path):
        """
        输入测试集中的图片
        承接cnn网络
        导入model
        :param x_images: 是测试集中的图片
        :param test_model_path:‘./data/model/plate_recognize/model.ckpt’ restore的模型地址
        :return:pred_res：最大值索引
                prob_res：最大值比率
        """
        out_put = self.cnn_construct()
        predicts = tf.nn.relu6(out_put)
        probabilitys = tf.reduce_max(predicts, axis=1)  # 返回最大值
        predicts = tf.argmax(predicts, axis=1)  # 返回位置索引，位置索引为1时是有plate，为0时无
        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            saver.restore(sess, test_model_path)
            pred_res, prob_res = sess.run([predicts, probabilitys],
                                          feed_dict={self.x_place: x_images, self.keep_place: 1.0})
        return pred_res, prob_res

    def list_all_files(self, root):
        """
        获取全部图片的位置地址，采用递归的方式
        当传入的是一个文件夹时，遍历该文件夹
        传入的是文件时，直接将其append到列表中
        :param root:    含有图片文件的父文件夹
        :return:    传入的root文件夹中的所有图片文件的地址 type: list
        """
        files = []
        list_all = os.listdir(root)
        for num in range(len(list_all)):
            element = os.path.join(root, list_all[num])
            if os.path.isdir(element):
                files.extend(self.list_all_files(element))
            elif os.path.isfile(element):
                files.append(element)
        return files

    def init_data(self, init_dir):
        """
        构建one_hot模型
        图片中有车牌的，其所属的文件夹名为has，将其定义为[0,1]，文件夹为no，将其定义为[1,0]
        调整图片尺寸，将其固定为[136,36,3]
        :param init_dir:
        :return: x: 文件夹图片的二进制3维数组
                 y: 每个图片对应的one_hot值（x和y是一一对应的)
        """
        x = []
        y = []
        if not os.path.exists(init_dir):
            raise ValueError('没有找到文件夹')
        files = self.list_all_files(init_dir)
        labels = [os.path.split(os.path.dirname(file))[-1] for file in files]

        for num, file in enumerate(files):
            src_img = cv2.imread(file)
            if src_img.ndim != 3:
                continue
            resize_img = cv2.resize(src_img, (136, 36))
            x.append(resize_img)
            y.append([[0, 1] if labels[num] == 'has' else [1, 0]])

        x = np.array(x)
        y = np.array(y).reshape(-1, 2)
        return x, y

    def init_test_data(self, test_data_dir):
        """
        读取测试集图片文件数据
        将图片以灰度图的形式读入
        :param test_data_dir: ./data/train/cnn_plate_test
        :return: test_x: 测试集中图片的数据
        """
        test_x = []
        if not os.path.exists(test_data_dir):
            raise ValueError('没有找到文件夹')
        files = self.list_all_files(test_data_dir)
        for file in files:
            src_img = cv2.imread(file, cv2.COLOR_BGR2GRAY)
            if src_img.ndim != 3:
                continue
            resize_img = cv2.resize(src_img, (136, 36))
            test_x.append(resize_img)
        test_x = np.array(test_x)
        return test_x


if __name__ == '__main__':
    data_dir = os.path.join('./data/train/cnn_plate_train')
    test_dir = os.path.join('./data/train/cnn_plate_test')
    train_model_path = os.path.join('./data/model/plate_recognize/model.ckpt')
    model_path = os.path.join('./data/model/plate_recognize/model.ckpt')

    train_flag = 1
    # train_flag = 1 ：训练模型
    # train_flag = 0 ：测试模型的准确度
    net = PlateCNN()

    if train_flag == 1:
        # 训练模型
        net.train(data_dir, train_model_path)
    else:
        # 测试部分
        test_X = net.init_test_data(test_dir)
        preds, probs = net.test(test_X, model_path)
        for i in range(len(preds)):
            pred = preds[i].astype(int)
            prob = probs[i]
            if pred == 1:
                print('plate', prob)
            else:
                print('no', prob)
