#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Author: renjin@bit.edu.cn

import math
import cv2
import base64
import copy
import math
import numpy as np
try:
    from pycocotools import mask as pycoco_mask
except:
    pass



class Colors:
    def __init__(self):
        hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A',
                '92CC17', '3DDB86', '1A9334', '00D4BB', '2C99A8', '00C2FF',
                '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF',
                'FF95C8', 'FF37C7')
        self.palette = [self.hex2rgb(f'#{c}') for c in hexs]
        self.n = len(self.palette)

    def __call__(self, i, bgr=False):
        c = self.palette[int(i) % self.n]
        return (c[2], c[1], c[0]) if bgr else c

    @staticmethod
    def hex2rgb(h):  # rgb order (PIL)
        return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))


g_colors_obj = Colors()


def calc_fov(camera_matrix, imgsz):
    fov_x = 2 * math.degrees(math.atan(imgsz[0] / 2. / camera_matrix[0, 0]))
    fov_y = 2 * math.degrees(math.atan(imgsz[1] / 2. / camera_matrix[1, 1]))
    return [fov_x, fov_y]


def calc_los_pos(camera_matrix, imgsz, cxy, owh_pixel, owh_meter):
    los_ax = math.degrees(math.atan((cxy[0] * imgsz[0] - imgsz[0] / 2.) / camera_matrix[0, 0]))
    los_ay = math.degrees(math.atan((cxy[1] * imgsz[1] - imgsz[1] / 2.) / camera_matrix[1, 1]))
    assert owh_meter[0] > 0 or owh_meter[1] > 0
    if owh_meter[0] > 0:
        z = camera_matrix[0, 0] * owh_meter[0] / owh_pixel[0]
        x = math.tan(math.radians(los_ax)) * z
        y = math.tan(math.radians(los_ay)) * z
    else:
        z = camera_matrix[1, 1] * owh_meter[1] / owh_pixel[1]
        x = math.tan(math.radians(los_ax)) * z
        y = math.tan(math.radians(los_ay)) * z
    return [los_ax, los_ay], [x, y, z]


def draw_bbox(img, msg, show_seg=False):
    min_siz = min(msg['spirecv_msgs::2DTargets']['height'], msg['spirecv_msgs::2DTargets']['width'])
    if min_siz <= 720:
        thickness = 1
    elif 720 < min_siz <= 1200:
        thickness = 2
    else:
        thickness = 3

    if show_seg:
        masks = []
        result_classid = []
        for obj in msg['spirecv_msgs::2DTargets']['targets']:
            if "segmentation" in obj:
                obj_seg = copy.deepcopy(obj['segmentation'])
                obj_seg_counts = base64.b64decode(obj['segmentation']['counts'])
                obj_seg["counts"] = obj_seg_counts
                mask = pycoco_mask.decode(obj_seg)
                masks.append(mask)
                result_classid.append(obj['category_id'])

        if len(masks) > 0:
            alpha = 0.5
            colors_ = [g_colors_obj(x, True) for x in result_classid]
            masks = np.asarray(masks, dtype=np.uint8)
            masks = np.ascontiguousarray(masks.transpose(1, 2, 0))
            masks = np.asarray(masks, dtype=np.float32)
            
            s = masks.sum(2, keepdims=True).clip(0, 1)
            masks = (masks @ colors_).clip(0, 255)
            img[:] = masks * alpha + img * (1 - s * alpha)

    for obj in msg['spirecv_msgs::2DTargets']['targets']:
        if 'obb' in obj:
            img = draw_rotated_box(
                img, 
                (int(round(obj['obb'][0])), int(round(obj['obb'][1]))),
                (int(round(obj['obb'][2])), int(round(obj['obb'][3]))),
                obj['obb'][4]
            )
        cv2.rectangle(
            img,
            (int(obj['bbox'][0]), int(obj['bbox'][1])),
            (int(obj['bbox'][0] + obj['bbox'][2]), int(obj['bbox'][1] + obj['bbox'][3])),
            (0, 0, 255),
            thickness,
            cv2.LINE_AA
        )
        if 'tracked_id' in obj:
            cate_text = str(obj['tracked_id']) + "-" + obj['category_name']
        else:
            cate_text = obj['category_name']
        if 'score' in obj:
            cate_text += "-" + "{:.2f}".format(obj['score'])
        if 'keypoints' in obj:
            kpts = obj['keypoints']
            if 'kpt_links' in msg['spirecv_msgs::2DTargets']:
                for kl in msg['spirecv_msgs::2DTargets']['kpt_links']:
                    if kpts[kl[0]][0] > 0 and kpts[kl[0]][1] > 0 and kpts[kl[1]][0] > 0 and kpts[kl[1]][1] > 0:
                        cv2.line(img, (int(kpts[kl[0]][0]), int(kpts[kl[0]][1])), (int(kpts[kl[1]][0]), int(kpts[kl[1]][1])), (0, 255, 255), 2)
            for kp in obj['keypoints']:
                if kp[0] > 0 and kp[1] > 0:
                    cv2.circle(img, (int(kp[0]), int(kp[1])), 4, (180, 105, 255), -1)
        (text_w, text_h), baseline= cv2.getTextSize(cate_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
        if obj['bbox'][3] < 50:  # pixel
            cv2.rectangle(
                img,
                (int(obj['bbox'][0]), int(obj['bbox'][1])),
                (int(obj['bbox'][0] + text_w + 2), int(obj['bbox'][1] - 17)),
                (0, 0, 0),
                -1,
                cv2.LINE_AA
            )
            cv2.putText(
                img,
                cate_text,
                (int(obj['bbox'][0]) + 2, int(obj['bbox'][1]) - 5),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.5,
                (255, 255, 255),
                1
            )
        else:
            cv2.rectangle(
                img,
                (int(obj['bbox'][0]), int(obj['bbox'][1])),
                (int(obj['bbox'][0] + text_w + 2), int(obj['bbox'][1] + 17)),
                (0, 0, 0),
                -1,
                cv2.LINE_AA
            )
            cv2.putText(
                img,
                cate_text,
                (int(obj['bbox'][0]) + 2, int(obj['bbox'][1]) + 13),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.5,
                (255, 255, 255),
                1
            )
    return img
