#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
Author:Lijiacai
Email:1050518702@qq.com
===========================================
CopyRight@JackLee.com
===========================================
"""
import base64
import math
import os
import sys
import json
import cv2
import time
import numpy as np
import requests
import tensorflow as tf
import keras
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
from keras.models import load_model
from keras.models import Model, Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, MaxPooling2D
from keras.layers import Conv2D, Input, MaxPool2D, Reshape, Activation, Flatten, Dense
from keras.layers import Conv2D, MaxPool2D
from keras.optimizers import SGD
from keras import backend as K
from scipy.ndimage import filters

K.set_image_dim_ordering('tf')
# global graph


model_path = {
    "plate_type": "./model/plate_type.h5",
    "fontC": "./Font/platech.ttf",
    "watch_cascade": "./model/cascade.xml",
    "model12": "./model/model12.h5",
    "ocr_model": "./model/ocr_plate_all_w_rnn_2.h5"
}
graph = None
fontC = None
watch_cascade = None
plate_type_model = None
model12 = None
ocr_model = None

chineseocr = "http://119.3.222.159:15002/ocr"

chars = ["京", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "皖", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂",
         "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A",
         "B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "U", "V", "W", "X",
         "Y", "Z", "港", "学", "使", "警", "澳", "挂", "军", "北", "南", "广", "沈", "兰", "成", "济", "海", "民", "航", "空"
         ]

chinese_chars = ["京", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "皖", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤",
                 "桂",
                 "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "港", "学", "使", "警", "澳", "挂", "军", "北", "南", "广",
                 "沈", "兰", "成", "济", "海", "民", "航", "空"
                 ]

plateType = ["蓝牌", "单层黄牌", "新能源车牌", "白色", "黑色-港澳"]


class ImageDealer():
    def find_edge(self, image):
        """
        # 寻找车牌左右边界
        :param image:
        :return:
        """
        sum_i = image.sum(axis=0)
        sum_i = sum_i.astype(np.float)
        sum_i /= image.shape[0] * 255
        start = 0
        end = image.shape[1] - 1

        for i, one in enumerate(sum_i):
            if one > 0.4:
                start = i
                if start - 3 < 0:
                    start = 0
                else:
                    start -= 3
                break
        for i, one in enumerate(sum_i[::-1]):

            if one > 0.4:
                end = end - i
                if end + 4 > image.shape[1] - 1:
                    end = image.shape[1] - 1
                else:
                    end += 4
                break
        return start, end

    def verticalEdgeDetection(self, image):
        """
        垂直边缘检测
        :param image:
        :return:
        """
        image_sobel = cv2.Sobel(image.copy(), cv2.CV_8U, 1, 0)
        # image = auto_canny(image_sobel)
        # img_sobel, CV_8U, 1, 0, 3, 1, 0, BORDER_DEFAULT
        # canny_image  = auto_canny(image)
        flag, thres = cv2.threshold(image_sobel, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY)
        flag, thres = cv2.threshold(image_sobel, int(flag * 0.7), 255, cv2.THRESH_BINARY)
        # thres = simpleThres(image_sobel)
        kernal = np.ones(shape=(3, 15))
        thres = cv2.morphologyEx(thres, cv2.MORPH_CLOSE, kernal)
        return thres

    def horizontalSegmentation(self, image):
        """
        # 确定粗略的左右边界
        :param image:
        :return:
        """
        thres = self.verticalEdgeDetection(image)
        # thres = thres*image
        head, tail = self.find_edge(thres)
        # print head,tail
        # cv2.imshow("edge",thres)
        tail = tail + 5
        if tail > 135:
            tail = 135
        image = image[0:35, head:tail]
        image = cv2.resize(image, (int(136), int(36)))
        return image

    def drawRectBox(self, image, rect, addText):
        """
        # 打上boundingbox和标签
        :param image:
        :param rect:
        :param addText:
        :return:
        """
        cv2.rectangle(image, (int(rect[0]), int(rect[1])), (int(rect[0] + rect[2]), int(rect[1] + rect[3])),
                      (0, 0, 255), 2,
                      cv2.LINE_AA)
        cv2.rectangle(image, (int(rect[0] - 1), int(rect[1]) - 16), (int(rect[0] + 115), int(rect[1])), (0, 0, 255), -1,
                      cv2.LINE_AA)

        img = Image.fromarray(image)
        draw = ImageDraw.Draw(img)
        # draw.text((int(rect[0]+1), int(rect[1]-16)), addText.decode("utf-8"), (255, 255, 255), font=fontC)
        draw.text((int(rect[0] + 1), int(rect[1] - 16)), addText, (255, 255, 255), font=fontC)
        imagex = np.array(img)

        return imagex

    def computeSafeRegion(self, shape, bounding_rect):
        top = bounding_rect[1]  # y
        bottom = bounding_rect[1] + bounding_rect[3]  # y +  h
        left = bounding_rect[0]  # x
        right = bounding_rect[0] + bounding_rect[2]  # x +  w

        min_top = 0
        max_bottom = shape[0]
        min_left = 0
        max_right = shape[1]

        if top < min_top:
            top = min_top
        if left < min_left:
            left = min_left

        if bottom > max_bottom:
            bottom = max_bottom
            # print "tap max_bottom max"
        if right > max_right:
            right = max_right
            # print "tap max_right max"

        # print "corr",left,top,right,bottom
        return [left, top, right - left, bottom - top]

    def cropped_from_image(self, image, rect):
        x, y, w, h = self.computeSafeRegion(image.shape, rect)
        return image[y:y + h, x:x + w]

    def detectPlateRough(self, image_gray, resize_h=720, en_scale=1.08, top_bottom_padding_rate=0.05):
        if top_bottom_padding_rate > 0.2:
            # print("error:top_bottom_padding_rate > 0.2:", top_bottom_padding_rate)
            raise Exception("error:top_bottom_padding_rate == {}".format(top_bottom_padding_rate))
        height = image_gray.shape[0]
        padding = int(height * top_bottom_padding_rate)
        scale = image_gray.shape[1] / float(image_gray.shape[0])

        image = cv2.resize(image_gray, (int(scale * resize_h), resize_h))

        image_color_cropped = image[padding:resize_h - padding, 0:image_gray.shape[1]]

        image_gray = cv2.cvtColor(image_color_cropped, cv2.COLOR_RGB2GRAY)

        watches = watch_cascade.detectMultiScale(image_gray, en_scale, 2, minSize=(36, 9), maxSize=(36 * 40, 9 * 40))

        cropped_images = []
        for (x, y, w, h) in watches:
            cropped_origin = self.cropped_from_image(image_color_cropped, (int(x), int(y), int(w), int(h)))
            x -= w * 0.3
            w += w * 0.6
            y -= h * 0.8
            h += h * 1.1

            cropped = self.cropped_from_image(image_color_cropped, (int(x), int(y), int(w), int(h)))

            cropped_images.append([cropped, [x, y + padding, w, h], cropped_origin])
        return cropped_images

    def fitLine_ransac(self, pts, zero_add=0):
        if len(pts) >= 2:
            [vx, vy, x, y] = cv2.fitLine(pts, cv2.DIST_HUBER, 0, 0.01, 0.01)
            lefty = int((-x * vy / vx) + y)
            righty = int(((136 - x) * vy / vx) + y)
            return lefty + 30 + zero_add, righty + 30 + zero_add
        return 0, 0

    def angle(self, x, y):
        return int(math.atan2(float(y), float(x)) * 180.0 / 3.1415)

    def h_rot(self, src, angle, scale=1.0):
        w = src.shape[1]
        h = src.shape[0]
        rangle = np.deg2rad(angle)
        nw = (abs(np.sin(rangle) * h) + abs(np.cos(rangle) * w)) * scale
        nh = (abs(np.cos(rangle) * h) + abs(np.sin(rangle) * w)) * scale
        rot_mat = cv2.getRotationMatrix2D((nw * 0.5, nh * 0.5), angle, scale)
        rot_move = np.dot(rot_mat, np.array([(nw - w) * 0.5, (nh - h) * 0.5, 0]))
        rot_mat[0, 2] += rot_move[0]
        rot_mat[1, 2] += rot_move[1]
        return cv2.warpAffine(src, rot_mat, (int(math.ceil(nw)), int(math.ceil(nh))), flags=cv2.INTER_LANCZOS4)
        pass

    def v_rot(self, img, angel, shape, max_angel):
        size_o = [shape[1], shape[0]]
        size = (shape[1] + int(shape[0] * np.cos((float(max_angel) / 180) * 3.14)), shape[0])
        interval = abs(int(np.sin((float(angel) / 180) * 3.14) * shape[0]))
        pts1 = np.float32([[0, 0], [0, size_o[1]], [size_o[0], 0], [size_o[0], size_o[1]]])
        if (angel > 0):
            pts2 = np.float32([[interval, 0], [0, size[1]], [size[0], 0], [size[0] - interval, size_o[1]]])
        else:
            pts2 = np.float32([[0, 0], [interval, size[1]], [size[0] - interval, 0], [size[0], size_o[1]]])

        M = cv2.getPerspectiveTransform(pts1, pts2)
        dst = cv2.warpPerspective(img, M, size)
        return dst, M

    def skew_detection(self, image_gray):
        """

        :param image_gray:
        :return:
        """
        h, w = image_gray.shape[:2]
        eigen = cv2.cornerEigenValsAndVecs(image_gray, 12, 5)
        angle_sur = np.zeros(180, np.uint)
        eigen = eigen.reshape(h, w, 3, 2)
        flow = eigen[:, :, 2]
        vis = image_gray.copy()
        vis[:] = (192 + np.uint32(vis)) / 2
        d = 12
        points = np.dstack(np.mgrid[d / 2:w:d, d / 2:h:d]).reshape(-1, 2)
        for x, y in points:
            vx, vy = np.int32(flow[int(y), int(x)] * d)

            ang = self.angle(vx, vy)
            angle_sur[(ang + 180) % 180] += 1

        angle_sur = angle_sur.astype(np.float)
        angle_sur = (angle_sur - angle_sur.min()) / (angle_sur.max() - angle_sur.min())
        angle_sur = filters.gaussian_filter1d(angle_sur, 5)
        skew_v_val = angle_sur[20:180 - 20].max()
        skew_v = angle_sur[30:180 - 30].argmax() + 30
        skew_h_A = angle_sur[0:30].max()
        skew_h_B = angle_sur[150:180].max()
        skew_h = 0
        if (skew_h_A > skew_v_val * 0.3 or skew_h_B > skew_v_val * 0.3):
            if skew_h_A >= skew_h_B:
                skew_h = angle_sur[0:20].argmax()
            else:
                skew_h = - angle_sur[160:180].argmax()
        return skew_h, skew_v

    def fastDeskew(self, image):
        image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        skew_h, skew_v = self.skew_detection(image_gray)
        # print("校正角度 h ", skew_h, "v", skew_v)
        deskew, M = self.v_rot(image, int((90 - skew_v) * 1.5), image.shape, 60)
        return deskew, M

    def findContoursAndDrawBoundingBox(self, image_rgb):
        """
        #精定位算法
        :param image_rgb:
        :return:
        """

        line_upper = []
        line_lower = []

        line_experiment = []
        gray_image = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY)

        for k in np.linspace(-50, 0, 15):

            binary_niblack = cv2.adaptiveThreshold(gray_image, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 17,
                                                   k)
            imagex, contours, hierarchy = cv2.findContours(binary_niblack.copy(), cv2.RETR_EXTERNAL,
                                                           cv2.CHAIN_APPROX_SIMPLE)
            for contour in contours:
                bdbox = cv2.boundingRect(contour)
                if (bdbox[3] / float(bdbox[2]) > 0.7 and bdbox[3] * bdbox[2] > 100 and bdbox[3] * bdbox[2] < 1200) or (
                        bdbox[3] / float(bdbox[2]) > 3 and bdbox[3] * bdbox[2] < 100):
                    line_upper.append([bdbox[0], bdbox[1]])
                    line_lower.append([bdbox[0] + bdbox[2], bdbox[1] + bdbox[3]])

                    line_experiment.append([bdbox[0], bdbox[1]])
                    line_experiment.append([bdbox[0] + bdbox[2], bdbox[1] + bdbox[3]])

        rgb = cv2.copyMakeBorder(image_rgb, 30, 30, 0, 0, cv2.BORDER_REPLICATE)
        leftyA, rightyA = self.fitLine_ransac(np.array(line_lower), 3)
        rows, cols = rgb.shape[:2]

        leftyB, rightyB = self.fitLine_ransac(np.array(line_upper), -3)

        rows, cols = rgb.shape[:2]

        pts_map1 = np.float32([[cols - 1, rightyA], [0, leftyA], [cols - 1, rightyB], [0, leftyB]])
        pts_map2 = np.float32([[136, 36], [0, 36], [136, 0], [0, 0]])
        mat = cv2.getPerspectiveTransform(pts_map1, pts_map2)
        image = cv2.warpPerspective(rgb, mat, (136, 36), flags=cv2.INTER_CUBIC)
        image, M = self.fastDeskew(image)

        return image

    def finemappingVertical(self, image):
        resized = cv2.resize(image, (66, 16))
        resized = resized.astype(np.float) / 255
        with graph.as_default():
            res = model12.predict(np.array([resized]))[0]
        res = res * image.shape[1]
        res = res.astype(np.int)
        H, T = res
        H -= 3

        if H < 0:
            H = 0
        T += 2

        if T >= image.shape[1] - 1:
            T = image.shape[1] - 1

        image = image[0:35, H:T + 2]

        image = cv2.resize(image, (int(136), int(36)))
        return image


def load_models(model_path):
    global fontC
    global watch_cascade
    global plate_type_model
    global model12
    global ocr_model
    global graph
    graph = tf.get_default_graph()
    fontC = ImageFont.truetype(model_path.get("fontC"), 14, 0)
    watch_cascade = cv2.CascadeClassifier(model_path.get("watch_cascade"))
    plate_type_model = load_model(model_path.get("plate_type"))

    # model12 = load_model(model_path.get("model12"))
    # ocr_model = load_model(model_path.get("ocr_model"))

    def Model12():
        input = Input(shape=[16, 66, 3])  # change this shape to [None,None,3] to enable arbitraty shape input
        x = Conv2D(10, (3, 3), strides=1, padding='valid', name='conv1')(input)
        x = Activation("relu", name='relu1')(x)
        x = MaxPool2D(pool_size=2)(x)
        x = Conv2D(16, (3, 3), strides=1, padding='valid', name='conv2')(x)
        x = Activation("relu", name='relu2')(x)
        x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv3')(x)
        x = Activation("relu", name='relu3')(x)
        x = Flatten()(x)
        output = Dense(2, name="dense")(x)
        output = Activation("relu", name='relu4')(output)
        model = Model([input], [output])
        return model

    def OcrModel():
        input_tensor = Input((None, 40, 3))
        x = input_tensor
        base_conv = 32
        for i in range(3):
            x = Conv2D(base_conv * (2 ** (i)), (3, 3), padding="same")(x)
            x = BatchNormalization()(x)
            x = Activation('relu')(x)
            x = MaxPooling2D(pool_size=(2, 2))(x)
        x = Conv2D(256, (5, 5))(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Conv2D(1024, (1, 1))(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Conv2D(len(chars) + 1, (1, 1))(x)
        x = Activation('sigmoid')(x)
        model = Model(inputs=input_tensor, outputs=x)
        return model

    model12 = Model12()
    model12.load_weights(model_path.get("model12"))
    ocr_model = OcrModel()
    ocr_model.load_weights(model_path.get("ocr_model"))


class ModelPredicter():

    def plate_type_predict(self, image):
        image = cv2.resize(image, (34, 9))
        image = image.astype(np.float) / 255
        with graph.as_default():
            res = np.array(plate_type_model.predict(np.array([image]))[0])
        return res.argmax()

    def fastdecode(self, y_pred):
        results = ""
        confidence = 0.0
        table_pred = y_pred.reshape(-1, len(chars) + 1)
        res = table_pred.argmax(axis=1)
        for i, one in enumerate(res):
            if one < len(chars) and (i == 0 or (one != res[i - 1])):
                results += chars[one]
                confidence += table_pred[i][one]
        confidence /= len(results)
        return results, confidence

    def plate_predict(self, src):
        """
        识别一个车牌
        :param src:
        :return:
        """
        x_tempx = src
        x_temp = cv2.resize(x_tempx, (160, 40))
        x_temp = x_temp.transpose(1, 0, 2)
        t0 = time.time()
        with graph.as_default():
            y_pred = ocr_model.predict(np.array([x_temp]))
        y_pred = y_pred[:, 2:, :]
        return self.fastdecode(y_pred)


class Recognizer():
    def predict(self, image_path=None, image_base64=None, image_url=None):
        if image_path:
            return self.__predict__(self.cv2_image(image_path))
        elif image_base64:
            return self.__predict__(self.base64_2_cv2(image_base64))
        elif image_url:
            return self.__predict__(self.url_image(image_url))
        else:
            raise Exception("请给一个图片")

    def ocr_predict(self, image_path=None, image_base64=None, image_url=None):
        if image_path:
            return self.__predict_ocr__(self.image_2_base64(self.cv2_image(image_path)))
        elif image_base64:
            return self.__predict_ocr__(self.image_2_base64(self.base64_2_cv2(image_base64)))
        elif image_url:
            return self.__predict_ocr__(self.image_2_base64(self.url_image(image_url)))
        else:
            raise Exception("请给一个图片")

    def __predict_ocr__(self, base64_str):
        data = {
            'imgString': base64_str,
            "billModel": "通用OCR", "textAngle": True, "textLine": False
        }
        texts = []
        try:
            res = requests.post(url=chineseocr, json=data).json().get("res")
            for i in res:
                text = i.get("text", "")
                s_new = ""
                for s in text:
                    if s in chars:
                        s_new += s
                if s_new:
                    if s_new[0] in chinese_chars and 5 <= len(s_new) <= 10:
                        texts.append(s_new)
        except:
            pass
        return list(set(texts))

    def url_image(self, url):
        r = requests.get(url=url, headers={
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"})
        image = cv2.imdecode(np.frombuffer(r.content, np.uint8), cv2.IMREAD_COLOR)
        return image

    def image_2_base64(self, image):
        base64_str = cv2.imencode('.jpg', image)[1].tostring()
        base64_str = base64.b64encode(base64_str).decode("utf8")
        return base64_str

    def cv2_image(self, path):
        image = cv2.imread("./aaaa.jpg")
        return image

    def base64_2_cv2(self, image_base64):
        images = image_base64.split("base64,")
        imgData = base64.b64decode(images[1])
        nparr = np.fromstring(imgData, np.uint8)
        image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        return image

    def __predict__(self, image):
        image_dealer = ImageDealer()
        predicter = ModelPredicter()
        se = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3), (-1, -1))
        image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, se)
        images = image_dealer.detectPlateRough(image, int(image.shape[0] * 1.4), top_bottom_padding_rate=0.1)
        result = []
        for j, plate in enumerate(images):
            plate, rect, origin_plate = plate
            plate = cv2.resize(plate, (200, 36 * 1))
            ptype = predicter.plate_type_predict(plate)
            if ptype > 0 and ptype < 5:
                plate = cv2.bitwise_not(plate)
            image_rgb = image_dealer.findContoursAndDrawBoundingBox(plate)
            image_rgb = image_dealer.finemappingVertical(image_rgb)
            type_ = plateType[ptype]
            res, confidence = predicter.plate_predict(image_rgb)
            result.append({"plateType": type_, "licensePlate": res, "confidence": confidence})
        return result


def test():
    load_models(model_path=model_path)
    image_dealer = ImageDealer()
    predicter = ModelPredicter()
    image = cv2.imread("./aaaa.jpg")
    se = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3), (-1, -1))
    image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, se)
    images = image_dealer.detectPlateRough(image, image.shape[0], top_bottom_padding_rate=0.1)
    result = []
    for j, plate in enumerate(images):
        plate, rect, origin_plate = plate
        plate = cv2.resize(plate, (136, 36 * 2))
        ptype = predicter.plate_type_predict(plate)
        if ptype > 0 and ptype < 5:
            plate = cv2.bitwise_not(plate)
        image_rgb = image_dealer.findContoursAndDrawBoundingBox(plate)
        image_rgb = image_dealer.finemappingVertical(image_rgb)
        type_ = plateType[ptype]
        res, confidence = predicter.plate_predict(image_rgb)
        result.append({"plateType": type_, "licensePlate": res, "confidence": confidence})
    print(result)


def test1():
    r = Recognizer()
    res = r.predict(image_path="./aaaa.jpg")
    print(res)


if __name__ == '__main__':
    test1()
