import onnxruntime
import numpy as np
import cv2
import copy
import os
import argparse
from PIL import Image, ImageDraw, ImageFont
import time

'''
车牌检测和车牌识别
'''


class Car_Plate_Detect(object):
    '''
    车牌检测
    基于Chinese_license_plate_detection_recognition项目改编
    '''

    def __init__(self, model_path, image_size=(640, 640), conf_thresh=0.3, iou_thresh=0.5, device="cpu",
                 encrypt_way="aes"):
        self.model_path = model_path  # 算法模型路径和名称
        self.image_size = image_size  # 算法预测时图片大小
        self.conf_thresh = conf_thresh  # 认为车辆属于某种颜色的阈值
        self.iou_thresh = iou_thresh  # 认为车辆属于某种类型的阈值
        self.device = device  # 神经网络推理的设备，'cpu':cpu推理，0：0号显卡推理，1:1号显卡推理，...
        self.encrypt_way = encrypt_way  # 模型加密方法 "raw":没有加密，"aes":aes方法加密
        self.session = None

    def init_onnx_model(self):
        print("a",self.device)
        if self.encrypt_way == "raw":
            if self.device == "cpu":
                self.session = onnxruntime.InferenceSession(self.model_path, providers=['CPUExecutionProvider'])
            elif self.device in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]:
                device_id = self.device
                self.session = onnxruntime.InferenceSession(self.model_path, providers=['CUDAExecutionProvider'])
                self.session.set_providers(['CUDAExecutionProvider'], provider_options=[{'device_id': 0}])
            else:
                self.session = None
        # elif encrypt_way=="aes":
        #     #还没有实现加密的方法
        #     self.session=None
        else:
            self.session = None

    def my_letter_box(self, img, size=(640, 640)):  #
        h, w, c = img.shape
    
        r = min(size[0] / h, size[1] / w)
        new_h, new_w = int(h * r), int(w * r)
        top = int((size[0] - new_h) / 2)
        left = int((size[1] - new_w) / 2)
    
        bottom = size[0] - new_h - top
        right = size[1] - new_w - left
        img_resize = cv2.resize(img, (new_w, new_h))
        img = cv2.copyMakeBorder(img_resize, top, bottom, left, right, borderType=cv2.BORDER_CONSTANT,
                                 value=(114, 114, 114))
        return img, r, left, top
    # def my_letter_box(self, img, size=(640, 640)):
    #     h, w, c = img.shape

    #     # 检查图像尺寸是否为零
    #     if h == 0 or w == 0:
    #         raise ValueError("Input image dimensions cannot be zero")

    #     r = min(size[0] / h, size[1] / w)
    #     new_h, new_w = int(h * r), int(w * r)
    #     top = int((size[0] - new_h) / 2)
    #     left = int((size[1] - new_w) / 2)

    #     bottom = size[0] - new_h - top
    #     right = size[1] - new_w - left
    #     img_resize = cv2.resize(img, (new_w, new_h))
    #     # img = cv2.copyMakeBorder(img_resize, top, bottom, left, right, borderType=cv2.BORDER_CONSTANT,
    #     #                          value=(114, 114, 114))
    #     img = cv2.copyMakeBorder(img_resize, top, bottom, left, right, borderType=cv2.BORDER_CONSTANT,
    #                              value=(114, 114, 114, 255))

    #     return img, r, left, top

    def my_nms(self, boxes, iou_thresh):  # nms
        index = np.argsort(boxes[:, 4])[::-1]
        keep = []
        while index.size > 0:
            i = index[0]
            keep.append(i)
            x1 = np.maximum(boxes[i, 0], boxes[index[1:], 0])
            y1 = np.maximum(boxes[i, 1], boxes[index[1:], 1])
            x2 = np.minimum(boxes[i, 2], boxes[index[1:], 2])
            y2 = np.minimum(boxes[i, 3], boxes[index[1:], 3])

            w = np.maximum(0, x2 - x1)
            h = np.maximum(0, y2 - y1)

            inter_area = w * h
            union_area = (boxes[i, 2] - boxes[i, 0]) * (boxes[i, 3] - boxes[i, 1]) + (
                        boxes[index[1:], 2] - boxes[index[1:], 0]) * (boxes[index[1:], 3] - boxes[index[1:], 1])
            iou = inter_area / (union_area - inter_area)
            idx = np.where(iou <= iou_thresh)[0]
            index = index[idx + 1]
        return keep

    def xywh2xyxy(self, boxes):  # xywh坐标变为 左上 ，右下坐标 x1,y1  x2,y2
        xywh = copy.deepcopy(boxes)
        xywh[:, 0] = boxes[:, 0] - boxes[:, 2] / 2
        xywh[:, 1] = boxes[:, 1] - boxes[:, 3] / 2
        xywh[:, 2] = boxes[:, 0] + boxes[:, 2] / 2
        xywh[:, 3] = boxes[:, 1] + boxes[:, 3] / 2
        return xywh

    def restore_box(self, boxes, r, left, top):  # 返回原图上面的坐标
        boxes[:, [0, 2, 5, 7, 9, 11]] -= left
        boxes[:, [1, 3, 6, 8, 10, 12]] -= top

        boxes[:, [0, 2, 5, 7, 9, 11]] /= r
        boxes[:, [1, 3, 6, 8, 10, 12]] /= r
        return boxes

    def detect_pre_precessing(self, image):  # 检测前处理
        if image.shape[0] == 0 or image.shape[1] == 0:
            raise ValueError("Input image dimensions cannot be zero")

        image, r, left, top = self.my_letter_box(image, self.image_size)
        # cv2.imwrite("1.jpg",image)
        image = image[:, :, ::-1].transpose(2, 0, 1).copy().astype(np.float32)
        image = image / 255
        image = image.reshape(1, *image.shape)
        return image, r, left, top



    def post_precessing(self, dets, r, left, top):  # 检测后处理
        choice = dets[:, :, 4] > self.conf_thresh
        dets = dets[choice]
        dets[:, 13:15] *= dets[:, 4:5]
        box = dets[:, :4]
        boxes = self.xywh2xyxy(box)
        score = np.max(dets[:, 13:15], axis=-1, keepdims=True)
        index = np.argmax(dets[:, 13:15], axis=-1).reshape(-1, 1)
        output = np.concatenate((boxes, score, dets[:, 5:13], index), axis=1)
        reserve_ = self.my_nms(output, self.iou_thresh)
        output = output[reserve_]
        output = self.restore_box(output, r, left, top)
        return output

    def onnx_predict(self, image):
        '''
        返回detect_results：车牌区域的检测结果
        detect_results=[#车牌检测结果
            {
                "rect":[50,100,250,150], #车牌区域bbox在image中的[x1,y1,x2,y2]
                "landmark":[49,103,251,149], #车牌区域landmark在image中的[pt0,pt1,pt2,pt3]
                "score":0.87, #bbox置信度
                "double_level_plate":1, #是否双层车牌，1：双层车牌，0：单层车牌
            },#单个车牌区域的描述信息
            ...
        ]
        '''
        image, r, left, top = self.detect_pre_precessing(image)
        # print("#####1 image.shape:{}".format(image.shape))
        onnx_infer = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: image})[
            0]
        predict_results = self.post_precessing(onnx_infer, r, left, top)  # 检测后处理
        # outputs = post_precessing(predict_results,r,left,top) #检测后处理
        detect_results = []
        for predict_result in predict_results:
            result_dict = {}
            rect = predict_result[:4].tolist()
            land_marks = predict_result[5:13].reshape(4, 2)
            score = predict_result[4]
            result_dict['rect'] = rect
            result_dict['landmark'] = land_marks
            result_dict['score'] = score
            result_dict['double_level_plate'] = int(predict_result[-1])  # 是否双层车牌 1：双层车牌 0：单层车牌
            detect_results.append(result_dict)
        return detect_results

class Car_Plate_Recog(object):
    '''
    车牌识别
    基于Chinese_license_plate_detection_recognition项目改编
    '''

    def __init__(self, model_path, device="cpu", encrypt_way="aes"):
        self.model_path = model_path  # 算法模型路径和名称
        self.device = device  # 神经网络推理的设备，'cpu':cpu推理，0：0号显卡推理，1:1号显卡推理，...
        self.encrypt_way = encrypt_way  # 模型加密方法 "raw":没有加密，"aes":aes方法加密
        self.session = None
        self.plate_color_list = ['黑色', '蓝色', '绿色', '白色', '黄色']
        self.plateName = r"#京沪津渝冀晋蒙辽吉黑苏浙皖闽赣鲁豫鄂湘粤桂琼川贵云藏陕甘青宁新学警港澳挂使领民航危0123456789ABCDEFGHJKLMNPQRSTUVWXYZ险品"
        self.mean_value, self.std_value = ((0.588, 0.193))  # 识别模型均值标准差

    def init_onnx_model(self):
        if self.encrypt_way == "raw":
            if self.device == "cpu":
                self.session = onnxruntime.InferenceSession(self.model_path, providers=['CPUExecutionProvider'])
                # print("Plate recogntion model initalized successfully.")
            elif self.device in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]:
                device_id = self.device
                self.session = onnxruntime.InferenceSession(self.model_path, providers=['CUDAExecutionProvider'])
                self.session.set_providers(['CUDAExecutionProvider'], provider_options=[{'device_id': 0}])
                # print("Plate recogntion model initalized successfully.")
            else:
                self.session = None
        # elif encrypt_way=="aes":
        #     #还没有实现加密的方法
        #     self.session=None
        else:
            self.session = None

    def order_points(self, pts):  # 关键点排列 按照（左上，右上，右下，左下）的顺序排列
        rect = np.zeros((4, 2), dtype="float32")
        s = pts.sum(axis=1)
        rect[0] = pts[np.argmin(s)]
        rect[2] = pts[np.argmax(s)]
        diff = np.diff(pts, axis=1)
        rect[1] = pts[np.argmin(diff)]
        rect[3] = pts[np.argmax(diff)]
        return rect

    def four_point_transform(self, image, pts):  # 透视变换得到矫正后的图像，方便识别
        rect = self.order_points(pts)
        (tl, tr, br, bl) = rect
        widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
        widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
        maxWidth = max(int(widthA), int(widthB))
        heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
        heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
        maxHeight = max(int(heightA), int(heightB))
        dst = np.array([
            [0, 0],
            [maxWidth - 1, 0],
            [maxWidth - 1, maxHeight - 1],
            [0, maxHeight - 1]], dtype="float32")
        M = cv2.getPerspectiveTransform(rect, dst)
        warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
        # # 调试
        # cv2.imshow("Warped Image",warped)
        # cv2.waitKey(0)

        # return the warped image
        return warped

    def get_split_merge(self, img):  # 双层车牌进行分割后识别
        h, w, c = img.shape
        img_upper = img[0:int(5 / 12 * h), :]
        img_lower = img[int(1 / 3 * h):, :]
        # cv2.imshow("img",img)
        # cv2.imshow("img_upper1",img_upper)
        img_upper = cv2.resize(img_upper, (img_lower.shape[1], img_lower.shape[0]))
        # cv2.imshow("img_upper2",img_upper)
        # cv2.imshow("img_lower",img_lower)
        new_img = np.hstack((img_upper, img_lower))
        # cv2.imshow("new_img",new_img)
        cv2.waitKey(0)
        return new_img

    def decodePlate(self, preds):  # 识别后处理
        pre = 0
        newPreds = []
        for i in range(len(preds)):
            if preds[i] != 0 and preds[i] != pre:
                newPreds.append(preds[i])
            pre = preds[i]
        plate = ""
        for i in newPreds:
            plate += self.plateName[int(i)]
        return plate
        # return newPreds

    def rec_pre_precessing(self, img, size=(48, 168)):  # 识别前处理
        print("Input image shape",img.shape)
        print("Input image data range",img.min(),img.max())
        img = cv2.resize(img, (168, 48))
        img = img.astype(np.float32)
        img = (img / 255 - self.mean_value) / self.std_value  # 归一化 减均值 除标准差
        img = img.transpose(2, 0, 1)  # h,w,c 转为 c,h,w
        img = img.reshape(1, *img.shape)  # channel,height,width转为batch,channel,height,channel

        print("Normalied image mean",np.mean(img))
        print("Normalied image std",np.std(img))
        return img

    def get_plate_result(self, img):  # 识别后处理
        img = self.rec_pre_precessing(img)
        y_onnx_plate, y_onnx_color = self.session.run(
            [self.session.get_outputs()[0].name, self.session.get_outputs()[1].name],
            {self.session.get_inputs()[0].name: img})
        index = np.argmax(y_onnx_plate, axis=-1)
        index_color = np.argmax(y_onnx_color)
        plate_color = self.plate_color_list[index_color]
        # print(y_onnx[0])
        plate_no = self.decodePlate(index[0])
        return plate_no, plate_color

    def onnx_predict(self, image, detect_results):
        '''
        识别当前图片所有车牌区域对应的车牌号和车牌号底色
        image：当前图片矩阵，可能含有多个车牌
        detect_results：车牌区域的检测结果
        detect_results=[#车牌检测结果
            {
                "rect":[50,100,250,150], #车牌区域bbox在image中的[x1,y1,x2,y2]
                "landmark":[49,103,251,149], #车牌区域landmark在image中的[pt0,pt1,pt2,pt3]
                "score":0.87, #bbox置信度
                "double_level_plate":1, #是否双层车牌，1：双层车牌，0：单层车牌
            },#单个车牌区域的描述信息
            ...
        ]
        返回车牌识别结果：detect_results

        recog_results=[#车牌识别结果
            {
                "rect":[50,100,250,150], #车牌区域bbox在image中的[x1,y1,x2,y2]
                "landmark":[49,103,251,149], #车牌区域landmark在image中的[pt0,pt1,pt2,pt3]
                "double_level_plate":1, #是否双层车牌，1：双层车牌，0：单层车牌
                "plate_no":”川AB1889", #该区域车牌号
                "roi_height":47, #该区域的车牌仿射变换后的高度
                "plate_color":"蓝色" #该区域的车牌底色
            },#单个车牌区域的描述信息
            ...
        ]
        '''

        recog_results = []  # 车牌的识别结果
        for detect_result in detect_results:
            recog_result = {}
            rect = detect_result["rect"]
            land_marks = detect_result["landmark"]
            double_level_plate = detect_result["double_level_plate"]
            roi_img = self.four_point_transform(image, land_marks)

            if double_level_plate == 1:  # 代表是双层车牌
                roi_img = self.get_split_merge(roi_img)

                # 在这里进行图像数据类型转换
                roi_img = roi_img.astype('float32') / 255.0

            # 添加打印语句
            print("Converted image shape:", roi_img.shape)
            print("Converted image dtype:", roi_img.dtype)
            print("Expected input shape:", self.session.get_inputs()[0].shape)
            print("Expected input dtype:", self.session.get_inputs()[0].type)


            plate_no, plate_color = self.get_plate_result(roi_img)
            recog_result['rect'] = rect
            recog_result['landmarks'] = land_marks.tolist()
            recog_result['score'] = detect_result["score"]
            recog_result['double_level_plate'] = double_level_plate
            recog_result['plate_no'] = plate_no
            recog_result['roi_height'] = roi_img.shape[0]
            recog_result['plate_color'] = plate_color

            print("onnx_plate",plate_no)
            print("onnx_color",plate_color)

            recog_results.append(recog_result)
        return recog_results


def allFilePath(rootPath, allFIleList):  # 遍历文件
    fileList = os.listdir(rootPath)
    for temp in fileList:
        if os.path.isfile(os.path.join(rootPath, temp)):
            allFIleList.append(os.path.join(rootPath, temp))
        else:
            allFilePath(os.path.join(rootPath, temp), allFIleList)


def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20):  # 将识别结果画在图上
    if (isinstance(img, np.ndarray)):  # 判断是否OpenCV图片类型
        img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    draw = ImageDraw.Draw(img)
    fontText = ImageFont.truetype(
        "font/simsun.ttc", textSize, encoding="utf-8")
    draw.text((left, top), text, textColor, font=fontText)
    return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)

def detect_pre_precessing(self, image):  # 检测前处理
        
    image, r, left, top = self.my_letter_box(image, self.image_size)

    print("Image shape before resizing:", image.shape)
    if image.shape[0] == 0 or image.shape[1] == 0:
        print("Invalid image shape. Skipping resize.")
    else:
        image = cv2.resize(image, self.image_size)

    image = cv2.resize(image, self.image_size)
    image = image[:, :, ::-1].transpose(2, 0, 1).copy().astype(np.float32)
    image = image / 255
    image = image.reshape(1, *image.shape)
    # 解包操作
    image, r, left, top = image, r, left, top  # 根据你的实际代码进行修改

    # print("After unpacking:", image, r, left, top)  # 添加调试输出
    return image,r,left,top

    


# def draw_result(orgimg, dict_list, clors=None):
#     result_str = ""
#     for result in dict_list:
#         rect_area = result['rect']
#
#         x, y, w, h = rect_area[0], rect_area[1], rect_area[2] - rect_area[0], rect_area[3] - rect_area[1]
#         padding_w = 0.05 * w
#         padding_h = 0.11 * h
#         rect_area[0] = max(0, int(x - padding_w))
#         rect_area[1] = min(orgimg.shape[1], int(y - padding_h))
#         rect_area[2] = max(0, int(rect_area[2] + padding_w))
#         rect_area[3] = min(orgimg.shape[0], int(rect_area[3] + padding_h))
#
#         height_area = result['roi_height']
#         landmarks = result['landmarks']
#         result = result['plate_no']
#         result_str += result + " "
#         for i in range(4):  # 关键点
#             cv2.circle(orgimg, (int(landmarks[i][0]), int(landmarks[i][1])), 5, clors[i], -1)
#         cv2.rectangle(orgimg, (rect_area[0], rect_area[1]), (rect_area[2], rect_area[3]), (255, 255, 0), 2)  # 画框
#         if len(result) >= 1:
#             orgimg = cv2ImgAddText(orgimg, result, rect_area[0] - height_area, rect_area[1] - height_area - 10,
#                                    (0, 255, 0), height_area)
#     print(result_str)
#     return orgimg
def draw_result(orgimg, dict_list, colors=None):
    result_str = ""
    for i, result in enumerate(dict_list):
        rect_area = result['rect']

        x, y, w, h = rect_area[0], rect_area[1], rect_area[2] - rect_area[0], rect_area[3] - rect_area[1]
        padding_w = 0.05 * w
        padding_h = 0.11 * h
        rect_area[0] = max(0, int(x - padding_w))
        rect_area[1] = min(orgimg.shape[1], int(y - padding_h))
        rect_area[2] = max(0, int(rect_area[2] + padding_w))
        rect_area[3] = min(orgimg.shape[0], int(rect_area[3] + padding_h))

        height_area = result['roi_height']
        landmarks = result['landmarks']
        plate_number = result['plate_no']
        result_str += plate_number + " "

        # Use a color from the provided colors list, or default to (0, 255, 0) if colors is not specified
        color = colors[i] if colors is not None and len(colors) > i else (0, 255, 0)

        for i in range(4):  # 关键点
            cv2.circle(orgimg, (int(landmarks[i][0]), int(landmarks[i][1])), 5, color, -1)
        cv2.rectangle(orgimg, (rect_area[0], rect_area[1]), (rect_area[2], rect_area[3]), (255, 255, 0), 2)  # 画框
        if len(plate_number) >= 1:
            orgimg = cv2ImgAddText(orgimg, plate_number, rect_area[0] - height_area, rect_area[1] - height_area - 10,
                                   color, height_area)
    print(result_str)
    return orgimg










if __name__ == "__main__":
    begin = time.time()
    # 初始化车牌检测的模型
    car_plate_detect_inst = Car_Plate_Detect(model_path="onnx/plate_detect.onnx",
                                             image_size=(640, 640), conf_thresh=0.3, iou_thresh=0.5,
                                             device="cpu", encrypt_way="raw")
    car_plate_detect_inst.init_onnx_model()

    # 初始化车牌识别的模型
    car_plate_recog_inst = Car_Plate_Recog(model_path="onnx/plate_rec_color.onnx",
                                           device="cpu", encrypt_way="raw")
    car_plate_recog_inst.init_onnx_model()

    # image=cv2.imread("imgs/14.png")
    image = cv2.imread("img.png")

    # 车牌检测和识别
    detect_results = car_plate_detect_inst.onnx_predict(image)  # 检测车牌
    recog_results = car_plate_recog_inst.onnx_predict(image, detect_results)  # 识别车牌号码及颜色
    print(recog_results)










