import os
import time

import torch
import torchvision
import cv2
import numpy as np
from ultralytics import YOLO
from paddleocr import PaddleOCR

# 处理模型结果返回满足置信度的矩形框和关键点
def result_prosecc(result, thresh = 0.75):
    # 硬编码取result[0]因为摄像头帧实时传入只有一张图片，所以硬编码取[0]
    # 返回模型矩形框的xyxy格式(除了xyxy还有其他格式，比如xywh以及归一化格式等等，此处方便绘图)
    # 并转换为整数，方便cv2绘图，注意当前数据还在GPU上，度先根据置信提取所需矩形框，再移至CPU变成numpy
    boxes = result[0].boxes.xyxy.int()
    # 和boxes的处理方法类似，(除了xy格式还有xyn即归一化的xy格式），变成int，方便cv2.circle画图
    keypoints = result[0].keypoints.xy.int()
    # 矩形框的置信度
    conf = result[0].boxes.conf

    # 根据置信度提取矩形框和关键点，where和np.where用法好像是一样的，也是一个元组，移动到CPU并转换为np.ndarray
    boxes = boxes[torch.where(conf>thresh)[0]].cpu().numpy()
    keypoints = keypoints[torch.where(conf>thresh)[0]].cpu().numpy()

    # 返回结果
    return (boxes, keypoints)

# 透视变换车牌，接受原图片和目标的四个点
def perspec(img, points):
    '''
    :param img:  原图像，cv2格式的BGR
    :param points:   四个关键点[[xy],[xy]....
    :return: 透视变换后的车牌图
    '''
    imgCopy = img.copy()
    bottom_right = points[0]
    bottom_left = points[1]
    top_left = points[2]
    top_right = points[3]
    # 注意透视变换需要float32类型数据点
    scr_points = np.array([bottom_right,bottom_left,top_left,top_right], dtype=np.float32)
    # 硬编码到3：1
    dst_points = np.array([[450,150],[0,150],[0,0],[450,0]], dtype=np.float32)
    M = cv2.getPerspectiveTransform(scr_points, dst_points)
    perspec_result = cv2.warpPerspective(imgCopy, M, (450,150),None)
    # perspec_result = cv2.GaussianBlur(perspec_result, (5,5), 0)
    return perspec_result

# 识别一个车牌的文本
ocrModel = PaddleOCR(use_angle_cls = True ,use_gpu= True, debug=False, show_log=False)
def plate_ocr(plate, textConf=0.6):
    result = ocrModel.ocr(plate)
    result_str = ''

    # 处理每一个文本框的每一个文本
    for textRect in result:
        linetext = ''
        if textRect is None:
            print('空')
            break
        # 每一个文本框
        for line in textRect:
            # 每一行
            if line[1][1] > textConf:
                result_str = result_str + line[1][0]

        # print('text: ', linetext)
    return result_str[:2]+result_str[-6:]



if __name__ == '__main__':
    # 加载训练好的模型(n)
    model = YOLO(r'./runs/pose/train7/weights/best.pt')
    # 图像预处理
    transformer = torchvision.transforms.ToTensor()

    #摄像头设备
    camera = cv2.VideoCapture(0)
    print('camera opened: ', camera.isOpened())

    # 窗口
    cv2.namedWindow('plate detect', cv2.WINDOW_NORMAL)

    # 循环读取帧
    keyCode = -1
    ret = True
    while keyCode != 27 and ret:
        ret, frame = camera.read()
        H,W = frame.shape[:2]
        frame_batch = transformer(frame).unsqueeze(0)
        print(frame_batch.shape)
        # 模型前向传播
        t = time.time()
        result = model(frame_batch)
        print('forward time: ', 1000*(time.time()-t))
        # 获取目标框和关键点
        boxes, keypoints = result_prosecc(result, 0.75)

        # 矫正的车牌
        plates = []

        # 绘制目标框
        for i,box in enumerate(boxes):
            # 透视变换矫正车牌
            plates.append(perspec(frame, keypoints[i]))
            cv2.rectangle(frame, box[:2], box[2:], (0,0,255), 3)  # 很好理解
        # 绘制关键点注意嵌套循环，关键点的格式是(targetnum, n)目标个数，关键点数
        for target in keypoints:
            for point in target:
                cv2.circle(frame, point, 3, (0,255,0), 3)

        # 展示一下校正后的车牌
        for i,plate in enumerate(plates):
            plate_text = plate_ocr(plate)
            print('text: ', plate_text)
            cv2.imshow(f'plates{i}', plate)
            cv2.waitKey(1)

        cv2.imshow('plate detect', frame)
        keyCode = cv2.waitKey(0)
