# -*- coding: utf-8 -*-

from __future__ import print_function

from cigeratte_utils import PLATE_CHARS_DIGIT,PLATE_CHARS_LETTER,PLATE_CHARS_TOTAL
from cigeratte_utils import cigeratte_chars_count
from cigeratte_utils import plate_width_default
from cigeratte_utils import plate_height_default

from gen_cigeratte import GenSample

import numpy as np
import tensorflow as tf
import os
import cv2
import sys

# print ('cur dir: ' + os.getcwd())

model_save_path = '../data/model/e2e-tf/cig_2p_selu'
log_save_path ='../data/logs/e2e-tf/cig_2p_selu'

if not os.path.exists(model_save_path):
    os.makedirs(model_save_path)
if not os.path.exists(log_save_path):
    os.makedirs(log_save_path)

# 图像大小
IMAGE_HEIGHT = plate_height_default
IMAGE_WIDTH = plate_width_default

# 文本转向量
LETTER_SET_LEN = len(PLATE_CHARS_LETTER)
DIGIT_SET_LEN = len(PLATE_CHARS_DIGIT)
TOTAL_SET_LEN = len(PLATE_CHARS_TOTAL)

output_vector_size = (cigeratte_chars_count - 4) * DIGIT_SET_LEN + 4 * LETTER_SET_LEN

def text2vec(text):
    vector = np.zeros( output_vector_size )

    for i, c in enumerate(text):
        if 16 <= i < 20:
            idx = 16 * DIGIT_SET_LEN + (i-16) * LETTER_SET_LEN + PLATE_CHARS_LETTER.index(c)
        elif i < 16:
            idx = i * DIGIT_SET_LEN + PLATE_CHARS_DIGIT.index(c)
        else:
            idx = 16 * DIGIT_SET_LEN + 4 * LETTER_SET_LEN + (i-20) * DIGIT_SET_LEN + PLATE_CHARS_DIGIT.index(c)
        vector[idx] = 1
    return vector


def vet_pos2text(char_pos):
    text = []
    for i, c in enumerate(char_pos):
        if i < 16:
            text.append(PLATE_CHARS_TOTAL[c % DIGIT_SET_LEN])
        elif 16 <= i < 20:
            text.append(PLATE_CHARS_LETTER[c % LETTER_SET_LEN])
        else:
            text.append(PLATE_CHARS_TOTAL[c % DIGIT_SET_LEN])
    return "".join(text)


# 向量转回文本
def vec2text(vec):
    char_pos = vec.nonzero()[0]
    text = []
    for i, c in enumerate(char_pos):
        if i < 16:
            text.append(PLATE_CHARS_TOTAL[c % DIGIT_SET_LEN])
        elif 16 <= i < 20:
            text.append(PLATE_CHARS_LETTER[(c - 16 * DIGIT_SET_LEN) % LETTER_SET_LEN])
        else:
            text.append(PLATE_CHARS_TOTAL[(c - 16 * DIGIT_SET_LEN - 4 * LETTER_SET_LEN) % DIGIT_SET_LEN])
    return "".join(text)

# 向量（大小 cigeratte_chars_count * CHAR_SET_LEN ）用0,1编码 每10或26个编码一个字符，这样顺序有，字符也有
# vec = text2vec("1234567890123456XYZA567890123456")
# print(vec,vec.shape)
# text = vec2text(vec)
# print(text)  # F5Sd
# vec = text2vec("2341568890123456XYZA567893847283")
# print(vec,vec.shape)
# text = vec2text(vec)
# print(text)  # SFd5
#
# exit(0)

# 生成一个训练batch
def get_next_batch(g,batch_size = 128):
    batch_x = np.zeros([batch_size, IMAGE_HEIGHT * IMAGE_WIDTH])
    batch_y = np.zeros([batch_size, output_vector_size])

    for i in range(batch_size):
        text, image = g.gen_one(size=(IMAGE_WIDTH,IMAGE_HEIGHT),channels=1)

        batch_x[i, :] = image.flatten() / 255.0  # (image.flatten()-128)/128  mean为0
        batch_y[i, :] = text2vec(text)

    return batch_x, batch_y

# tensorflow-net
X = tf.placeholder(tf.float32, [None, IMAGE_HEIGHT * IMAGE_WIDTH])
Y = tf.placeholder(tf.float32, [None, output_vector_size])
keep_prob = tf.placeholder(tf.float32)  # dropout


# 定义CNN
def cnn_model(w_alpha=0.01, b_alpha=0.1):
    x = tf.reshape(X, shape=[-1, IMAGE_HEIGHT, IMAGE_WIDTH, 1])

    # 3 conv layer
    w_c1 = tf.Variable(w_alpha * tf.random_normal([3, 3, 1, 32]))
    b_c1 = tf.Variable(b_alpha * tf.random_normal([32]))
    conv1 = tf.nn.selu(tf.nn.bias_add(tf.nn.conv2d(x, w_c1, strides=[1, 1, 1, 1], padding='SAME'), b_c1))

    # w_c2 = tf.Variable(w_alpha * tf.random_normal([3, 3, 32, 32]))
    # b_c2 = tf.Variable(b_alpha * tf.random_normal([32]))
    # conv2 = tf.nn.selu(tf.nn.bias_add(tf.nn.conv2d(conv1, w_c2, strides=[1, 1, 1, 1], padding='SAME'), b_c2))

    pool1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    pool1 = tf.nn.dropout(pool1, keep_prob)

    w_c3 = tf.Variable(w_alpha * tf.random_normal([3, 3, 32, 64]))
    b_c3 = tf.Variable(b_alpha * tf.random_normal([64]))
    conv3 = tf.nn.selu(tf.nn.bias_add(tf.nn.conv2d(pool1, w_c3, strides=[1, 1, 1, 1], padding='SAME'), b_c3))

    #
    # w_c4 = tf.Variable(w_alpha * tf.random_normal([3, 3, 64, 64]))
    # b_c4 = tf.Variable(b_alpha * tf.random_normal([64]))
    # conv4 = tf.nn.selu(tf.nn.bias_add(tf.nn.conv2d(conv3, w_c4, strides=[1, 1, 1, 1], padding='SAME'), b_c4))

    pool2 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    pool2 = tf.nn.dropout(pool2, keep_prob)

    # Fully connected layer
    w_d = tf.Variable(w_alpha * tf.random_normal([pool2.shape[1].value * pool2.shape[2].value * pool2.shape[3].value, 512]))
    b_d = tf.Variable(b_alpha * tf.random_normal([512]))
    dense = tf.reshape(pool2, [-1, w_d.get_shape().as_list()[0]])
    dense = tf.nn.selu(tf.add(tf.matmul(dense, w_d), b_d))
    dense = tf.nn.dropout(dense, keep_prob)

    w_out = tf.Variable(w_alpha * tf.random_normal([512, output_vector_size ]))
    b_out = tf.Variable(b_alpha * tf.random_normal([output_vector_size]))
    out = tf.add(tf.matmul(dense, w_out), b_out)
    # out = tf.nn.softmax(out)
    return out

def load_model(saver, sess):
    step = 0
    try:
        ckpt = tf.train.get_checkpoint_state(model_save_path)
        if ckpt and ckpt.model_checkpoint_path:
            step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
            saver.restore(sess, ckpt.model_checkpoint_path)
            print('found model @ %d' % step)
            step += 1
        else:
            print('no model found')
    except:
        print('exception when load')
    return step

# 训练
def train_cigeratte_cnn():
    g = GenSample()

    output = cnn_model()
    # loss
    # loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(output, Y))
    loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=Y))

    tf.summary.scalar("loss",loss)

    # 最后一层用来分类的softmax和sigmoid有什么不同？
    # optimizer 为了加快训练 learning_rate应该开始大，然后慢慢衰
    optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)

    predict1 = tf.reshape(output[:,:16*DIGIT_SET_LEN], [-1, 16, DIGIT_SET_LEN])
    predict2 = tf.reshape(output[:,16*DIGIT_SET_LEN:16*DIGIT_SET_LEN+4*LETTER_SET_LEN], [-1, 4, LETTER_SET_LEN])
    predict3 = tf.reshape(output[:,16*DIGIT_SET_LEN+4*LETTER_SET_LEN:], [-1, 12, DIGIT_SET_LEN])
    # predict_digit = tf.concat([predict1,predict3],1)

    max_idx_p1 = tf.argmax(predict1, 2)
    max_idx_p2 = tf.argmax(predict2, 2)
    max_idx_p3 = tf.argmax(predict3, 2)

    Y_1 = tf.reshape(Y[:,:16*DIGIT_SET_LEN], [-1, 16, DIGIT_SET_LEN])
    Y_2 = tf.reshape(Y[:,16*DIGIT_SET_LEN:16*DIGIT_SET_LEN+4*LETTER_SET_LEN], [-1, 4, LETTER_SET_LEN])
    Y_3 = tf.reshape(Y[:,16*DIGIT_SET_LEN+4*LETTER_SET_LEN:], [-1, 12, DIGIT_SET_LEN])
    # Y_d = tf.concat([Y_1, Y_3], 1)

    max_idx_l1 = tf.argmax(Y_1, 2)
    max_idx_l2 = tf.argmax(Y_2, 2)
    max_idx_l3 = tf.argmax(Y_3, 2)

    correct_pred = tf.equal(tf.concat([max_idx_p1,max_idx_p2,max_idx_p3],1), tf.concat([max_idx_l1,max_idx_l2,max_idx_l3],1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    tf.summary.scalar("accuracy",accuracy)

    merged = tf.summary.merge_all()

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        step = load_model(saver,sess)

        file_writer = tf.summary.FileWriter(log_save_path, sess.graph)

        while True:
            batch_x, batch_y = get_next_batch(g,32)
            _,loss_, summary = sess.run([optimizer, loss, merged], feed_dict={X: batch_x, Y: batch_y, keep_prob: 0.6})
            # _ = sess.run([optimizer], feed_dict={X: batch_x, Y: batch_y, keep_prob: 0.75})
            print("step:%d, loss:%g" % (step, loss_))

            file_writer.add_summary(summary, step)

            # 每100 step计算一次准确率
            if step % 100 == 0:
                batch_x_test, batch_y_test = get_next_batch(g,100)
                acc = sess.run(accuracy, feed_dict={X: batch_x_test, Y: batch_y_test, keep_prob: 1.})
                print("step:%d, acc:%g" % (step, acc))
                sys.stdout.flush()
                # 如果准确率大于50%,保存模型,完成训练
                if acc > 0.99:
                    saver.save(sess, os.path.join(model_save_path,"ckpt.model"), global_step=step)
                    break

            if step % 500 == 0:
                saver.save(sess, os.path.join(model_save_path, "ckpt.model"), global_step=step)

            step += 1

if __name__ == '__main__':
    train_cigeratte_cnn()