import os
import sys
sys.path.append(os.getcwd())

import math

import cv2
import numpy as np
from rknnlite.api import RKNNLite
from hide_warnings import hide_warnings
import warnings
from stereo.yolov6.yolov6 import model_inference

# 身体关键点索引
BODY_PARTS = {
    "Nose": 0,
    "LEye": 1,
    "REye": 2,
    "LEar": 3,
    "REar": 4,
    "LShoulder": 5,
    "RShoulder": 6,
    "LElbow": 7,
    "RElbow": 8,
    "LWrist": 9,
    "RWrist": 10,
    "LHip": 11,
    "RHip": 12,
    "LKnee": 13,
    "RKnee": 14,
    "LAnkle": 15,
    "RAnkle": 16,
    "head": 17,
    "Neck": 18,
    "Hip": 19,
    "LBigToe": 20,
    "RBigToe": 21,
    "LSmallToe": 22,
    "RSmallToe": 23,
    "LHeel": 24,
    "RHeel": 25,
    "LOuterThigh": 26,
    "ROuterThigh": 27,
    "Chest": 28,
    "Belly": 29,
    "LHand": 30,
    "RHand": 31,
}

class PPnet_Pose:
    def __init__(self, rknn_model):
        self.rknn_lite = RKNNLite()
        print(rknn_model)
        ret = self.rknn_lite.load_rknn(rknn_model)
        if ret != 0:
            raise Exception('Load rknn model failed!')
        ret = self.rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_0)
        if ret != 0:
            raise Exception('Init runtime environment failed!')

        self.image_size = [256, 256]

    def __call__(self, image, bboxes=None):
        if bboxes is None:
            bboxes = np.array([0, 0, 480, 640])
        bboxes = np.array(bboxes, dtype=np.int32)  # 或 int64 也可以

        image, center, scale = self.preprocess(image, bboxes)
        results = self.i_inference(image)
        preds, maxvals = self.postprocess(results, center, scale)
        # pts_dict = self.process_output2dict(preds[0])
        
        return preds, maxvals

    def process_output2dict(self, preds):
        # 将输出转化人体关键点字典
        pts_dict = {}
        for i in range(preds.shape[0]):
            pts_dict[list(BODY_PARTS.keys())[i]] = preds[i]
        return pts_dict
    

        
    def preprocess(self, image, box):
        center, scale, area = self._box2cs(box)
        image = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2RGB)
        trans = self.get_affine_transform(center, scale, 0, self.image_size)
        image = cv2.warpAffine(
            image,
            trans,
            (self.image_size[0], self.image_size[1]),  # ！通道顺序
            flags=cv2.INTER_LINEAR
        )
        # image = image / 255
        # image = (image - np.array([[0.406, 0.457, 0.48]])) / np.array([1, 1, 1])
        # image = image.transpose((2, 0, 1))
        image = np.expand_dims(image, axis=0)
        # image = np.ascontiguousarray(image).astype(np.float32)
        return image, center, scale

    @hide_warnings
    def i_inference(self, image):
        return self.rknn_lite.inference(inputs=[image])

    def postprocess(self, results, center, scale):
        scale = np.array([scale])
        center = np.array([center])
        preds, maxvals, _ = self.get_final_preds(results[0], center, scale)
        return preds, maxvals

    def _box2cs(self, box):
        x, y, x1, y1 = box[:4]
        w, h = x1 - x, y1 - y
        return self._xywh2cs(x, y, w, h)

    def _xywh2cs(self, x, y, w, h):
        center = np.zeros((2), dtype=np.float32)
        center[0] = x + w * 0.5
        center[1] = y + h * 0.5
        aspect_ratio = 0.75
        pixel_std = 1

        if w > aspect_ratio * h:
            h = w * 1.0 / aspect_ratio
        elif w < aspect_ratio * h:
            w = h * aspect_ratio
        scale = np.array([w * 1.0 / pixel_std, h * 1.0 / pixel_std], dtype=np.float32)

        if center[0] != -1:
            scale = scale * 1.25
        area = w * h
        area = np.array(area)
        return center, scale, area

    def get_affine_transform(self,
            center, scale, rot, output_size,
            shift=np.array([0, 0], dtype=np.float32), inv=0
    ):
        if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
            scale = np.array([scale, scale])

        scale_tmp = scale  # * 200.0
        src_w = scale_tmp[0]
        dst_w = output_size[0]
        dst_h = output_size[1]

        rot_rad = np.pi * rot / 180
        src_dir = self.get_dir([0, src_w * -0.5], rot_rad)
        dst_dir = np.array([0, dst_w * -0.5], np.float32)

        src = np.zeros((3, 2), dtype=np.float32)
        dst = np.zeros((3, 2), dtype=np.float32)
        src[0, :] = center + scale_tmp * shift
        src[1, :] = center + src_dir + scale_tmp * shift
        dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
        dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir

        src[2:, :] = self.get_3rd_point(src[0, :], src[1, :])
        dst[2:, :] = self.get_3rd_point(dst[0, :], dst[1, :])

        if inv:
            trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
        else:
            trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))

        return trans

    def get_3rd_point(self, a, b):
        direct = a - b
        return b + np.array([-direct[1], direct[0]], dtype=np.float32)

    def get_dir(self, src_point, rot_rad):
        sn, cs = np.sin(rot_rad), np.cos(rot_rad)

        src_result = [0, 0]
        src_result[0] = src_point[0] * cs - src_point[1] * sn
        src_result[1] = src_point[0] * sn + src_point[1] * cs

        return src_result

    def get_final_preds(self, batch_heatmaps, center, scale):
        coords, maxvals = self.get_max_preds(batch_heatmaps)

        heatmap_height = batch_heatmaps.shape[2]
        heatmap_width = batch_heatmaps.shape[3]

        # post-processing

        for n in range(coords.shape[0]):
            for p in range(coords.shape[1]):
                hm = batch_heatmaps[n][p]
                px = int(math.floor(coords[n][p][0] + 0.5))
                py = int(math.floor(coords[n][p][1] + 0.5))
                if 1 < px < heatmap_width - 1 and 1 < py < heatmap_height - 1:
                    diff = np.array(
                        [
                            hm[py][px + 1] - hm[py][px - 1],
                            hm[py + 1][px] - hm[py - 1][px]
                        ]
                    )
                    coords[n][p] += np.sign(diff) * .25

        preds = coords.copy()
        preds2 = coords.copy()
        # print('preds',preds)
        # Transform back
        for i in range(coords.shape[0]):
            # print('scale[i]',scale[i])
            preds[i] = self.transform_preds(
                coords[i], center[i], scale[i], [heatmap_width, heatmap_height]
            )

        return preds, maxvals, preds2

    def get_max_preds(self, batch_heatmaps):
        '''
        get predictions from score maps
        heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
        '''
        assert isinstance(batch_heatmaps, np.ndarray), \
            'batch_heatmaps should be numpy.ndarray'
        assert batch_heatmaps.ndim == 4, 'batch_images should be 4-ndim'

        batch_size = batch_heatmaps.shape[0]
        num_joints = batch_heatmaps.shape[1]
        width = batch_heatmaps.shape[3]
        heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))
        idx = np.argmax(heatmaps_reshaped, 2)
        maxvals = np.amax(heatmaps_reshaped, 2)

        maxvals = maxvals.reshape((batch_size, num_joints, 1))
        idx = idx.reshape((batch_size, num_joints, 1))

        preds = np.tile(idx, (1, 1, 2)).astype(np.float32)

        preds[:, :, 0] = (preds[:, :, 0]) % width
        preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)

        pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
        pred_mask = pred_mask.astype(np.float32)

        preds *= pred_mask
        return preds, maxvals

    def transform_preds(self, coords, center, scale, output_size):
        target_coords = np.zeros(coords.shape)
        trans = self.get_affine_transform(center, scale, 0, output_size, inv=1)
        for p in range(coords.shape[0]):
            target_coords[p, 0:2] = self.affine_transform(coords[p, 0:2], trans)
        return target_coords

    def affine_transform(self, pt, t):
        new_pt = np.array([pt[0], pt[1], 1.]).T
        new_pt = np.dot(t, new_pt)
        return new_pt[:2]
def pose_to_bbox(keypoints: np.ndarray, expansion: float = 1.15) -> np.ndarray:
    """Get bounding box from keypoints.

    Args:
        keypoints (np.ndarray): Keypoints of person.
        expansion (float): Expansion ratio of bounding box.

    Returns:
        np.ndarray: Bounding box of person.
    """
    x = keypoints[:, 0]
    y = keypoints[:, 1]
    bbox = np.array([x.min(), y.min(), x.max(), y.max()])
    center = np.array([bbox[0] + bbox[2], bbox[1] + bbox[3]]) / 2
    bbox = np.concatenate([
        center - (center - bbox[:2]) * expansion,
        center + (bbox[2:] - center) * expansion
    ])
    return bbox

def compute_iou(bboxA, bboxB):
    """Compute the Intersection over Union (IoU) between two boxes .

    Args:
        bboxA (list): The first bbox info (left, top, right, bottom, score).
        bboxB (list): The second bbox info (left, top, right, bottom, score).

    Returns:
        float: The IoU value.
    """

    x1 = max(bboxA[0], bboxB[0])
    y1 = max(bboxA[1], bboxB[1])
    x2 = min(bboxA[2], bboxB[2])
    y2 = min(bboxA[3], bboxB[3])

    inter_area = max(0, x2 - x1) * max(0, y2 - y1)

    bboxA_area = (bboxA[2] - bboxA[0]) * (bboxA[3] - bboxA[1])
    bboxB_area = (bboxB[2] - bboxB[0]) * (bboxB[3] - bboxB[1])
    union_area = float(bboxA_area + bboxB_area - inter_area)
    if union_area == 0:
        union_area = 1e-5
        warnings.warn('union_area=0 is unexpected')

    iou = inter_area / union_area

    return iou
# from stereo.utils.Filter.OneEuroFilter_python import OneEuroFilter
# 使用追踪获取后续跟踪框
class PPnet_Pose_Track(PPnet_Pose):
    MIN_AREA = 1000
    def __init__(self,
                 rknn_model,
                 det_frequency: int = 1000,
                 tracking: bool = True,
                 tracking_thr: float = 0.3):
        super(PPnet_Pose_Track, self).__init__(rknn_model)
        self.pose_model = PPnet_Pose(rknn_model)
        self.det_frequency = det_frequency  # 检测频率
        self.tracking = tracking  # 是否启用跟踪
        self.tracking_thr = tracking_thr  # 跟踪的置信度阈值
        self.reset()
        self.det_model = model_inference
        # self.filter = OneEuroFilter(te=0.005)
    def reset(self):
        """Reset pose tracker."""
        # self.warmup()
        
        self.frame_cnt = 0
        self.next_id = 0
        self.bboxes_last_frame = []
        self.track_ids_last_frame = []

    def track_by_iou(self, bbox):
        """Get track id using IoU tracking greedily.

        Args:
            bbox (list): The bbox info (left, top, right, bottom, score).
            next_id (int): The next track id.

        Returns:
            track_id (int): The track id.
            match_result (list): The matched bbox.
            next_id (int): The updated next track id.
        """

        area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])

        max_iou_score = -1
        max_index = -1
        match_result = None
        for index, each_bbox in enumerate(self.bboxes_last_frame):

            iou_score = compute_iou(bbox, each_bbox)
            if iou_score > max_iou_score:
                max_iou_score = iou_score
                max_index = index

        if max_iou_score > self.tracking_thr:
            # if the bbox has a match and the IoU is larger than threshold
            track_id = self.track_ids_last_frame.pop(max_index)
            match_result = self.bboxes_last_frame.pop(max_index)

        elif area >= self.MIN_AREA:
            # no match, but the bbox is large enough,
            # assign a new track id
            track_id = self.next_id
            self.next_id += 1

        else:
            # if the bbox is too small, ignore it
            track_id = -1

        return track_id, match_result

    def __call__(self, image: np.ndarray, bboxes: list = None):
        h, w = image.shape[:2]
        if self.frame_cnt % self.det_frequency == 0 or len(self.bboxes_last_frame)==0:
            if bboxes is None:
                # print('No bboxes provided, detecting...')
                bboxes = self.det_model(image)
                # keypoints, _ = self.pose_model(image, bboxes)
                # for kpts in keypoints:
                    # bboxes = pose_to_bbox(kpts)
        else:
            # print('Using last frame bboxes...')
            bboxes = self.bboxes_last_frame
            bboxes = bboxes[0]
        keypoints, scores = self.pose_model(image, bboxes=bboxes)

        if not self.tracking:
            # without tracking
            bboxes_current_frame = []
            for kpts in keypoints:
                bbox = pose_to_bbox(kpts)

                bboxes_current_frame.append(bbox)
        else:
            # with tracking
            if len(self.track_ids_last_frame) == 0:
                self.next_id = len(self.bboxes_last_frame)
                self.track_ids_last_frame = list(range(self.next_id))

            bboxes_current_frame = []
            track_ids_current_frame = []
            for kpts in keypoints:
                bbox = pose_to_bbox(kpts)

                track_id, _ = self.track_by_iou(bbox)

                if track_id > -1:
                    track_ids_current_frame.append(track_id)
                    bboxes_current_frame.append(bbox)

            self.track_ids_last_frame = track_ids_current_frame

        self.bboxes_last_frame = bboxes_current_frame
        self.frame_cnt += 1
        
        # 对keypoints进行int
        keypoints = np.array(keypoints, dtype=np.int32)[0]
        # print(keypoints.silter(keypoints,0.01).astype(np.int32)

        # 对所有关键点坐标限制到640*480
        keypoints = np.clip(keypoints, 0, [w-1, h-1])
        kps_dict = self.process_output2dict(keypoints)
        return kps_dict, np.array(keypoints)
PPnet_Pose_Track_ins = PPnet_Pose_Track("stereo/ppnet/pose_ppnet.rknn")    
# image = np.random.randint(0, 255, (256, 256, 3), dtype=np.uint8)  # 随机生成图像
# keypoints, scores = PPnet_Pose_Track_ins(image)
if __name__ == '__main__':
    import time
    ppnet_track = PPnet_Pose_Track("ppnet/pose_ppnet.rknn")
    # 模拟30fps的图像处理
    frame_rate = 30
    frame_time = 1 / frame_rate  # 每帧时间，30fps对应约33.33毫秒

    # 模拟连续帧的推理
    num_frames = 100  # 假设要处理100帧图像
    print('-'*100)
    image_ori = cv2.imread('/home/orangepi/Stereo_Project/data/im0.png')

    while True:
        image = image_ori.copy()
        # image = np.random.randint(0, 255, (256, 256, 3), dtype=np.uint8)  # 随机生成图像
        if image is None:
            break

        # box = [0, 0, 1, 255]  # 假设的检测框
        
        # 开始计时
        time_start = time.time()
        
        # 执行推理
        result = ppnet_track(image)
        # 结束计时
        time_end = time.time()
        print(ppnet_track.bboxes_last_frame)
        box = ppnet_track.bboxes_last_frame[0]
        image = cv2.rectangle(image, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0), 2)


        # 计算每帧的推理时间
        inference_time = time_end - time_start
        print(f"Frame inference time: {inference_time:.4f} seconds")
        
        # 保证每秒30帧
        elapsed_time = time_end - time_start
        if elapsed_time < frame_time:
            time.sleep(frame_time - elapsed_time)  # 等待直到达到30fps
        break
