# -*- coding: utf-8 -*-

from __future__ import print_function

from plate_utils import PLATE_CHARS_TOTAL
from plate_utils import licence_plate_chars_count
from plate_utils import plate_width_default
from plate_utils import plate_height_default

from gen_plate import GenPlate

import numpy as np
import tensorflow as tf
import os
import cv2
import sys

# print ('cur dir: ' + os.getcwd())

model_save_path = '../data/model/e2e-tf/plate'
log_save_path ='../data/logs/e2e-tf/plate'

if not os.path.exists(model_save_path):
    os.makedirs(model_save_path)
if not os.path.exists(log_save_path):
    os.makedirs(log_save_path)

# 图像大小
IMAGE_HEIGHT = plate_height_default
IMAGE_WIDTH = plate_width_default

# 文本转向量
char_set = PLATE_CHARS_TOTAL
CHAR_SET_LEN = len(char_set)

# print(char_set,CHAR_SET_LEN)

def text2vec(text):
    vector = np.zeros(licence_plate_chars_count * CHAR_SET_LEN)

    for i, c in enumerate(text):
        idx = i * CHAR_SET_LEN + char_set.index(c)
        vector[idx] = 1
    return vector


# 向量转回文本
def vec2text(vec):
    char_pos = vec.nonzero()[0]
    text = []
    for i, c in enumerate(char_pos):
        text.append(char_set[c % CHAR_SET_LEN])
    return "".join(text)

# 向量（大小 cigeratte_chars_count * CHAR_SET_LEN ）用0,1编码 每65个编码一个字符，这样顺序有，字符也有
# vec = text2vec(u"粤RGC861")
# print(vec)
# text = vec2text(vec)
# print(text)  # F5Sd
# vec = text2vec(u"京3VES3S")
# print(vec)
# text = vec2text(vec)
# print(text)  # SFd5
#
# exit(0)

# 生成一个训练batch
def get_next_batch(g,batch_size = 128):
    batch_x = np.zeros([batch_size, IMAGE_HEIGHT * IMAGE_WIDTH])
    batch_y = np.zeros([batch_size, licence_plate_chars_count * CHAR_SET_LEN])

    for i in range(batch_size):
        text, image = g.gen_one(size=(IMAGE_WIDTH,IMAGE_HEIGHT),channels=1)

        batch_x[i, :] = image.flatten() / 255.0  # (image.flatten()-128)/128  mean为0
        batch_y[i, :] = text2vec(text)

    return batch_x, batch_y

# tensorflow-net
X = tf.placeholder(tf.float32, [None, IMAGE_HEIGHT * IMAGE_WIDTH])
Y = tf.placeholder(tf.float32, [None, licence_plate_chars_count * CHAR_SET_LEN])
keep_prob = tf.placeholder(tf.float32)  # dropout


# 定义CNN
def build_plate_cnn(w_alpha=0.01, b_alpha=0.1):
    x = tf.reshape(X, shape=[-1, IMAGE_HEIGHT, IMAGE_WIDTH, 1])

    # 3 conv layer
    w_c1 = tf.Variable(w_alpha * tf.random_normal([3, 3, 1, 32]))
    b_c1 = tf.Variable(b_alpha * tf.random_normal([32]))
    conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, w_c1, strides=[1, 1, 1, 1], padding='SAME'), b_c1))
    conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    conv1 = tf.nn.dropout(conv1, keep_prob)

    w_c2 = tf.Variable(w_alpha * tf.random_normal([3, 3, 32, 64]))
    b_c2 = tf.Variable(b_alpha * tf.random_normal([64]))
    conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, w_c2, strides=[1, 1, 1, 1], padding='SAME'), b_c2))
    conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    conv2 = tf.nn.dropout(conv2, keep_prob)

    w_c3 = tf.Variable(w_alpha * tf.random_normal([3, 3, 64, 64]))
    b_c3 = tf.Variable(b_alpha * tf.random_normal([64]))
    conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, w_c3, strides=[1, 1, 1, 1], padding='SAME'), b_c3))
    conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    conv3 = tf.nn.dropout(conv3, keep_prob)


    w_c4 = tf.Variable(w_alpha * tf.random_normal([3, 3, 64, 64]))
    b_c4 = tf.Variable(b_alpha * tf.random_normal([64]))
    conv4 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv3, w_c4, strides=[1, 1, 1, 1], padding='SAME'), b_c4))
    conv4 = tf.nn.max_pool(conv4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    conv4 = tf.nn.dropout(conv4, keep_prob)

    # Fully connected layer
    w_d = tf.Variable(w_alpha * tf.random_normal([conv4.shape[1].value * conv4.shape[2].value * conv4.shape[3].value, 1024]))
    b_d = tf.Variable(b_alpha * tf.random_normal([1024]))
    dense = tf.reshape(conv4, [-1, w_d.get_shape().as_list()[0]])
    dense = tf.nn.relu(tf.add(tf.matmul(dense, w_d), b_d))
    dense = tf.nn.dropout(dense, keep_prob)

    w_out = tf.Variable(w_alpha * tf.random_normal([1024, licence_plate_chars_count * CHAR_SET_LEN]))
    b_out = tf.Variable(b_alpha * tf.random_normal([licence_plate_chars_count * CHAR_SET_LEN]))
    out = tf.add(tf.matmul(dense, w_out), b_out)
    # out = tf.nn.softmax(out)
    return out

def load_model(saver, sess):
    step = 0
    try:
        ckpt = tf.train.get_checkpoint_state(model_save_path)
        if ckpt and ckpt.model_checkpoint_path:
            step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
            saver.restore(sess, ckpt.model_checkpoint_path)
            print('found model @ %d' % step)
            step += 1
        else:
            print('no model found')
    except:
        print('exception when load')
    return step

# 训练
def train_crack_plate_cnn():
    g = GenPlate()

    output = build_plate_cnn()
    # loss
    # loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(output, Y))
    loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=Y))

    tf.summary.scalar("loss",loss)

    # 最后一层用来分类的softmax和sigmoid有什么不同？
    # optimizer 为了加快训练 learning_rate应该开始大，然后慢慢衰
    optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)

    predict = tf.reshape(output, [-1, licence_plate_chars_count, CHAR_SET_LEN])
    max_idx_p = tf.argmax(predict, 2)
    max_idx_l = tf.argmax(tf.reshape(Y, [-1, licence_plate_chars_count, CHAR_SET_LEN]), 2)
    correct_pred = tf.equal(max_idx_p, max_idx_l)
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    tf.summary.scalar("accuracy",accuracy)

    merged = tf.summary.merge_all()

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        step = load_model(saver,sess)

        file_writer = tf.summary.FileWriter(log_save_path, sess.graph)

        while True:
            batch_x, batch_y = get_next_batch(g,32)
            _, loss_, summary = sess.run([optimizer, loss, merged], feed_dict={X: batch_x, Y: batch_y, keep_prob: 0.75})
            print(step, loss_)

            file_writer.add_summary(summary, step)

            # 每100 step计算一次准确率
            if step % 100 == 0:
                batch_x_test, batch_y_test = get_next_batch(g,100)
                acc = sess.run(accuracy, feed_dict={X: batch_x_test, Y: batch_y_test, keep_prob: 1.})
                print(step, acc)
                sys.stdout.flush()
                # 如果准确率大于50%,保存模型,完成训练
                if acc > 0.99:
                    saver.save(sess, os.path.join(model_save_path,"ckpt.model"), global_step=step)
                    break

            if step % 500 == 0:
                saver.save(sess, os.path.join(model_save_path, "ckpt.model"), global_step=step)

            step += 1

if __name__ == '__main__':
    train_crack_plate_cnn()