#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
Author:Lijiacai
Email:1050518702@qq.com
===========================================
CopyRight@JackLee.com
===========================================
"""

import cv2
import json
import sys
import time
import numpy as np
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
from . import e2e
from . import detect
from . import finemapping  as  fm
from . import segmentation
from . import typeDistinguish as td
from . import finemapping_vertical as fv

fontC = ImageFont.truetype("./Font/platech.ttf", 14, 0)


class Recognazier():
    def find_edge(self, image):
        """
        # 寻找车牌左右边界
        :param image:
        :return:
        """
        sum_i = image.sum(axis=0)
        sum_i = sum_i.astype(np.float)
        sum_i /= image.shape[0] * 255
        start = 0
        end = image.shape[1] - 1

        for i, one in enumerate(sum_i):
            if one > 0.4:
                start = i
                if start - 3 < 0:
                    start = 0
                else:
                    start -= 3
                break
        for i, one in enumerate(sum_i[::-1]):

            if one > 0.4:
                end = end - i
                if end + 4 > image.shape[1] - 1:
                    end = image.shape[1] - 1
                else:
                    end += 4
                break
        return start, end

    def verticalEdgeDetection(self, image):
        """
        垂直边缘检测
        :param image:
        :return:
        """
        image_sobel = cv2.Sobel(image.copy(), cv2.CV_8U, 1, 0)
        # image = auto_canny(image_sobel)
        # img_sobel, CV_8U, 1, 0, 3, 1, 0, BORDER_DEFAULT
        # canny_image  = auto_canny(image)
        flag, thres = cv2.threshold(image_sobel, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY)
        flag, thres = cv2.threshold(image_sobel, int(flag * 0.7), 255, cv2.THRESH_BINARY)
        # thres = simpleThres(image_sobel)
        kernal = np.ones(shape=(3, 15))
        thres = cv2.morphologyEx(thres, cv2.MORPH_CLOSE, kernal)
        return thres

    def horizontalSegmentation(self, image):
        """
        # 确定粗略的左右边界
        :param image:
        :return:
        """
        thres = self.verticalEdgeDetection(image)
        # thres = thres*image
        head, tail = self.find_edge(thres)
        # print head,tail
        # cv2.imshow("edge",thres)
        tail = tail + 5
        if tail > 135:
            tail = 135
        image = image[0:35, head:tail]
        image = cv2.resize(image, (int(136), int(36)))
        return image

    def drawRectBox(self, image, rect, addText):
        """
        # 打上boundingbox和标签
        :param image:
        :param rect:
        :param addText:
        :return:
        """
        cv2.rectangle(image, (int(rect[0]), int(rect[1])), (int(rect[0] + rect[2]), int(rect[1] + rect[3])),
                      (0, 0, 255), 2,
                      cv2.LINE_AA)
        cv2.rectangle(image, (int(rect[0] - 1), int(rect[1]) - 16), (int(rect[0] + 115), int(rect[1])), (0, 0, 255), -1,
                      cv2.LINE_AA)

        img = Image.fromarray(image)
        draw = ImageDraw.Draw(img)
        # draw.text((int(rect[0]+1), int(rect[1]-16)), addText.decode("utf-8"), (255, 255, 255), font=fontC)
        draw.text((int(rect[0] + 1), int(rect[1] - 16)), addText, (255, 255, 255), font=fontC)
        imagex = np.array(img)

        return imagex

    def direct_recognize(self, image):
        t0 = time.time()
        images = detect.detectPlateRough(image, image.shape[0], top_bottom_padding_rate=0.1)
        result = []
        for j, plate in enumerate(images):
            plate, rect, origin_plate = plate
            # plate = cv2.cvtColor(plate, cv2.COLOR_RGB2GRAY)
            plate = cv2.resize(plate, (136, 36 * 2))
            t1 = time.time()
            ptype = td.SimplePredict(plate)
            if ptype > 0 and ptype < 5:
                plate = cv2.bitwise_not(plate)
            image_rgb = fm.findContoursAndDrawBoundingBox(plate)
            image_rgb = fv.finemappingVertical(image_rgb)
            type_ = td.plateType[ptype]
            res, confidence = e2e.recognizeOne(image_rgb)
            result.append({"plateType": type_, "licensePlate": res, "confidence": confidence})
        return result
