import cv2
import numpy as np
from tensorflow import keras


# ------------该代码可以放到一个独立的模块文件进行引入--------------------------------
def locate_and_correct(img_src, img_mask):
    try:  # contours1长度为0说明未检测到车牌
        contours, hierarchy = cv2.findContours(img_mask[:, :, 0], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    except:
        ret, contours, hierarchy = cv2.findContours(img_mask[:, :, 0], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    if not len(contours):
        # print("未检测到车牌")
        return [], []
    else:
        Lic_img = []
        img_src_copy = img_src.copy()
        for ii, cont in enumerate(contours):
            x, y, w, h = cv2.boundingRect(cont)
            img_cut_mask = img_mask[y:y + h, x:x + w]

            # contours中除了车牌区域可能会有宽或高都是1或者2这样的小噪点，
            # 而待选车牌区域的均值应较高，且宽和高不会非常小，因此通过以下条件进行筛选
            if np.mean(img_cut_mask) >= 75 and w > 15 and h > 15:
                rect = cv2.minAreaRect(cont)  # 针对坐标点获取带方向角的最小外接矩形，中心点坐标，宽高，旋转角度
                box = cv2.boxPoints(rect).astype(np.int32)  # 获取最小外接矩形四个顶点坐标
                cont = cont.reshape(-1, 2).tolist()
                # 由于转换矩阵的两组坐标位置需要一一对应，因此需要将最小外接矩形的坐标进行排序，最终排序为[左上，左下，右上，右下]
                box = sorted(box, key=lambda xy: xy[0])  # 先按照左右进行排序，分为左侧的坐标和右侧的坐标
                box_left, box_right = box[:2], box[2:]  # 此时box的前2个是左侧的坐标，后2个是右侧的坐标
                box_left = sorted(box_left, key=lambda x: x[1])  # 再按照上下即y进行排序，此时box_left中为左上和左下两个端点坐标
                box_right = sorted(box_right, key=lambda x: x[1])  # 此时box_right中为右上和右下两个端点坐标
                box = np.array(box_left + box_right)  # [左上，左下，右上，右下]
                # print(box)
                x0, y0 = box[0][0], box[0][1]  # 这里的4个坐标即为最小外接矩形的四个坐标，接下来需获取平行(或不规则)四边形的坐标
                x1, y1 = box[1][0], box[1][1]
                x2, y2 = box[2][0], box[2][1]
                x3, y3 = box[3][0], box[3][1]

                def point_to_line_distance(X, Y):
                    if x2 - x0:
                        k_up = (y2 - y0) / (x2 - x0)  # 斜率不为无穷大
                        d_up = abs(k_up * X - Y + y2 - k_up * x2) / (k_up ** 2 + 1) ** 0.5
                    else:  # 斜率无穷大
                        d_up = abs(X - x2)
                    if x1 - x3:
                        k_down = (y1 - y3) / (x1 - x3)  # 斜率不为无穷大
                        d_down = abs(k_down * X - Y + y1 - k_down * x1) / (k_down ** 2 + 1) ** 0.5
                    else:  # 斜率无穷大
                        d_down = abs(X - x1)
                    return d_up, d_down

                d0, d1, d2, d3 = np.inf, np.inf, np.inf, np.inf
                l0, l1, l2, l3 = (x0, y0), (x1, y1), (x2, y2), (x3, y3)
                for each in cont:  # 计算cont中的坐标与矩形四个坐标的距离以及到上下两条直线的距离，对距离和进行权重的添加，成功计算选出四边形的4个顶点坐标
                    x, y = each[0], each[1]
                    dis0 = (x - x0) ** 2 + (y - y0) ** 2
                    dis1 = (x - x1) ** 2 + (y - y1) ** 2
                    dis2 = (x - x2) ** 2 + (y - y2) ** 2
                    dis3 = (x - x3) ** 2 + (y - y3) ** 2
                    d_up, d_down = point_to_line_distance(x, y)
                    weight = 0.975
                    if weight * d_up + (1 - weight) * dis0 < d0:  # 小于则更新
                        d0 = weight * d_up + (1 - weight) * dis0
                        l0 = (x, y)
                    if weight * d_down + (1 - weight) * dis1 < d1:
                        d1 = weight * d_down + (1 - weight) * dis1
                        l1 = (x, y)
                    if weight * d_up + (1 - weight) * dis2 < d2:
                        d2 = weight * d_up + (1 - weight) * dis2
                        l2 = (x, y)
                    if weight * d_down + (1 - weight) * dis3 < d3:
                        d3 = weight * d_down + (1 - weight) * dis3
                        l3 = (x, y)

                p0 = np.float32([l0, l1, l2, l3])  # 左上角，左下角，右上角，右下角，p0和p1中的坐标顺序对应，以进行转换矩阵的形成
                p1 = np.float32([(0, 0), (0, 80), (240, 0), (240, 80)])  # 我们所需的长方形
                transform_mat = cv2.getPerspectiveTransform(p0, p1)  # 构成转换矩阵
                lic = cv2.warpPerspective(img_src, transform_mat, (240, 80))  # 进行车牌矫正
                Lic_img.append(lic)
                cv2.drawContours(img_src_copy, [np.array([l0, l1, l3, l2])], -1, (0, 255, 0), 2)
                cv2.imwrite('static/plateimg/plate.jpg', lic)  # 保存截取到的车牌图片到本地
    return img_src_copy, Lic_img


# -----------------------------------主要部分------------------------------------------------------
# 车牌定位
def unet_predict(unet, img_src_path):
    try:
        img_src = cv2.imdecode(np.fromfile(img_src_path, dtype=np.uint8), -1)
        if img_src.shape != (512, 512, 3):
            img_src = cv2.resize(img_src, dsize=(512, 512), interpolation=cv2.INTER_AREA)[:, :, :3]
        img_src = img_src.reshape(1, 512, 512, 3)

        img_mask = unet.predict(img_src)
        img_src = img_src.reshape(512, 512, 3)
        img_mask = img_mask.reshape(512, 512, 3)
        img_mask = img_mask / np.max(img_mask) * 255
        img_mask[:, :, 2] = img_mask[:, :, 1] = img_mask[:, :, 0]
        img_mask = img_mask.astype(np.uint8)

        return img_src, img_mask
    except:
        return None, None


def cnn_predict(cnn, Lic_img):
    characters = ["京", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "皖", "闽", "赣", "鲁", "豫",
                  "鄂", "湘", "粤", "桂", "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "0", "1", "2",
                  "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M",
                  "N", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]

    placestr = '无法识别'
    for img_np in Lic_img:
        _, img_encode = cv2.imencode('.jpg', img_np)
        img_decode = cv2.imdecode(img_encode, 1)
        img = cv2.resize(img_decode, (240, 80))
        lic_pred = cnn.predict(img.reshape(1, 80, 240, 3))
        lic_pred = np.array(lic_pred).reshape(7, 65)
        # 判断准确率4个字符大于0.8
        char = ''
        if len(lic_pred[lic_pred >= 0.8]) >= 4:
            for arg in np.argmax(lic_pred, axis=1):
                char += characters[arg]
            char = char[0:2] + char[2:]
            placestr = char

    return placestr


'''
这段代码的作用是根据输入的图像路径 img_src_path，首先使用 cv2 库将图像解码为 NumPy 数组 img_src。然后，通过计算图像的高度 h 和宽度 w，
判断图像是否满足一定的条件，即图像的像素总数小于等于 240x80，并且宽高比在 2 到 5 之间。如果满足条件，则将图像缩放到大小为 240x80，并提取 
RGB 通道，得到 lic。然后，将 img_src_copy 和 Lic_img 分别赋值为 img_src 和 [lic]，并调用 cnn_predict 函数对 Lic_img 进行字符识别，
返回结果，并打印输出。如果不满足条件，则调用 unet_predict 函数对图像进行处理，得到修正后的图像 img_src 和二值化掩码
图像 img_mask。然后，调用 locate_and_correct 函数对图像进行定位和修正，返回修正后的图像 img_src_copy 和字符图像列表 Lic_img。最后，
再次调用 cnn_predict 函数对 Lic_img 进行字符识别，返回结果存储在变量 a 中，并打印输出。
'''
def place_str_relust(img_src_path ):
    img_src = cv2.imdecode(np.fromfile(img_src_path, dtype=np.uint8), -1)
    h, w = img_src.shape[0], img_src.shape[1]
    if h * w <= 240 * 80 and 2 <= w / h <= 5:
        lic = cv2.resize(img_src, dsize=(240, 80), interpolation=cv2.INTER_AREA)[:, :, :3]
        img_src_copy, Lic_img = img_src, [lic]
        relust = cnn_predict(cnn, Lic_img)
        return relust
    else:
        img_src, img_mask = unet_predict(unet, img_src_path)
        img_src_copy, Lic_img = locate_and_correct(img_src, img_mask)
        relust = cnn_predict(cnn, Lic_img)
        return relust


unet = keras.models.load_model('ai/models/unet.h5')  # 加载车牌定位模型
cnn = keras.models.load_model('ai/models/cnn.h5')  # 加载车牌识别模型

# img_src_path = r'C:\Users\29569\Desktop\img\jj.jpg'  # 车牌图片路径

# 调用得到识别结果
# print(place_str_relust(img_src_path))
