import cv2 
import numpy as np 
import tritonclient.http as httpclient
import time 

class YoloFace:
    def __init__(self, triton_port=8000, model_name='ensemble_yolov8_face', input_size=640, conf_thres=0.5, iou_thres=0.5):
        self.conf_thres = conf_thres
        self.iou_thres = iou_thres 
        self.input_width, self.input_height = input_size, input_size

        triton_host = f'localhost:{triton_port}'
        self.client = httpclient.InferenceServerClient(triton_host)
        self.model_name = model_name

        self.output_names = ['output0']
        self.inputs = [httpclient.InferInput('images', [1, input_size, input_size, 3], 'UINT8')]
        self.outputs = [httpclient.InferRequestedOutput(name) for name in self.output_names]


    def resize_img(self, img, keep_ratio=True):
        self.top, self.left, self.newh, self.neww = 0, 0, self.input_width, self.input_height 
        
        if keep_ratio and img.shape[0] != img.shape[1]:
            hw_scale = img.shape[0] / img.shape[1]
            if hw_scale > 1:
                self.newh, self.neww = self.input_height, int(self.input_width / hw_scale)
                img = cv2.resize(img, (self.neww, self.newh), interpolation=cv2.INTER_AREA)
                self.left = int((self.input_width - self.neww) * 0.5)
                img = cv2.copyMakeBorder(img, 0, 0, self.left, self.input_width-self.neww-self.left, 
                                         cv2.BORDER_CONSTANT, value=(0, 0, 0))
            else:
                self.newh, self.neww = int(self.input_height * hw_scale), self.input_width
                img = cv2.resize(img, (self.neww, self.newh), interpolation=cv2.INTER_AREA)
                self.top = int((self.input_height - self.newh) * 0.5)
                img = cv2.copyMakeBorder(img, self.top, self.input_height-self.newh-self.top, 0, 0, 
                                         cv2.BORDER_CONSTANT, value=(0, 0, 0))
        else:
            img = cv2.resize(img, (self.input_width, self.input_height), interpolation=cv2.INTER_AREA)

        return img

    def preprocess(self, bgr_img):
        img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
        img = self.resize_img(img)

        return img

        img_data = np.array(img) / 255.0
        img_data = np.transpose(img_data, (2, 0, 1))
        img_data = np.expand_dims(img_data, axis=0).astype(np.float32)

        return img_data 
    
    def model_forward(self, img):
        img = np.expand_dims(img, axis=0)
        self.inputs[0].set_data_from_numpy(img)

        response = self.client.infer(self.model_name,
                                     self.inputs,
                                     request_id=str(1),
                                     outputs=self.outputs)
        outputs = [response.as_numpy(name) for name in self.output_names]
        return outputs 
    
    def postprocess(self, outputs):
        output = np.transpose(np.squeeze(outputs[0]))

        idx = output[:,4] > self.conf_thres 
        output = output[idx]

        output[:,0] = output[:,0] - output[:,2] / 2
        output[:,1] = output[:,1] - output[:,3] / 2
        
        idx = cv2.dnn.NMSBoxes(output[:,0:4], output[:,4], self.conf_thres, self.iou_thres)
        output = output[idx]

        x_factor = self.img_width / self.neww
        y_factor = self.img_height / self.newh

        output[:,0] = output[:,0] - self.left 
        output[:,1] = output[:,1] - self.top
        output[:,0] = output[:,0] * x_factor 
        output[:,1] = output[:,1] * y_factor
        output[:,2] = output[:,2] * x_factor + output[:,0]
        output[:,3] = output[:,3] * y_factor + output[:,1]
        for i in range(5):
            output[:,i*3+5] = output[:,i*3+5] - self.left 
            output[:,i*3+6] = output[:,i*3+6] - self.top
            output[:,i*3+5] = output[:,i*3+5] * x_factor 
            output[:,i*3+6] = output[:,i*3+6] * y_factor

        return output[:,0:5], output[:,[5,6,8,9,11,12,14,15,17,18]].reshape(-1, 5, 2)

    def detect(self, bgr_img, thres=0.5):
        self.conf_thres = thres
        self.img_height, self.img_width = bgr_img.shape[:2]

        img_data = self.preprocess(bgr_img)
        outputs = self.model_forward(img_data)
        bboxes, kpss = self.postprocess(outputs)

        return bboxes, kpss
    
    def draw_detections(self, bgr_img, bboxes, kpss):
        """
        在图像上绘制检测到的边界框和关键点。

        参数:
        bgr_img (numpy.ndarray): 原始BGR格式的图像。
        bboxes (numpy.ndarray): 检测到的边界框，形状为 [N, 5]，其中 N 是边界框的数量。
        kpss (numpy.ndarray): 检测到的关键点，形状为 [N, 5, 2]，其中 N 是边界框的数量。

        返回:
        numpy.ndarray: 绘制完成后的图像。
        """
        # 设置绘制颜色
        box_color = (0, 255, 0)  # 绿色边界框
        kp_color = (0, 0, 255)   # 红色关键点
        thickness = 2            # 线条粗细
        kp_radius = 2            # 关键点半径

        # 遍历每个边界框
        for box, kps in zip(bboxes, kpss):
            x1, y1, x2, y2, conf = box
            # 绘制边界框
            cv2.rectangle(bgr_img, (int(x1), int(y1)), (int(x2), int(y2)), box_color, thickness)
            
            # 绘制关键点
            for kp in kps:
                if kp[0] != -1 and kp[1] != -1:  # 检查关键点是否有效
                    cv2.circle(bgr_img, (int(kp[0]), int(kp[1])), kp_radius, kp_color, thickness)

        return bgr_img
    

if __name__ == '__main__':
    yolo_face = YoloFace(triton_port=8111,model_name='ensemble_yolov8_face')

    bgr_img = cv2.imread('body/2/1050_2025_05_18_01_37_33_173789.jpg')
    
    t1 = time.time()
    bboxes, kpss = yolo_face.detect(bgr_img)
    t2 = time.time()
    bgr_img = yolo_face.draw_detections(bgr_img, bboxes, kpss)
    t3 = time.time()
    cv2.imwrite('./output.jpg', bgr_img)

    print(f'detec time: {t2-t1:.3f}s, draw time: {t3-t2:.3f}s')

    # cap = cv2.VideoCapture('rtsp://admin:zjlab2022@10.0.106.112:554/Streaming/Channels/101')
    # while 1:
    #     ret, frame = cap.read()
    #     if frame is None:
    #         print('no frame')
    #         break 

    #     frame = cv2.resize(frame, (1920, 1080))

    #     t1 = time.time()
    #     bboxes, kpss = yolo_face.detect(frame)
    #     t2 = time.time()

    #     dstimg = yolo_face.draw_detections(frame, bboxes, kpss)
    #     t3 = time.time() 

    #     print(f'detect time: {(t2 - t1) * 1000:.2f}ms, draw time: {(t3 - t2) * 1000:.2f}ms -----------------------')
    #     cv2.imshow('output', dstimg)
    #     cv2.waitKey(1)
