# Copyright (c) OpenMMLab. All rights reserved.
import argparse

import cv2
import numpy as np
from PIL import Image
from mmdeploy_runtime import PoseDetector


class MMKeypointDetector(object):
    """
        model_dir: projects/wind_power/weights/keypoint_rtmpose-m
        keys:
            in_img: input_data
            in_bbox: kpt_bboxes
            out: kpt_det
        threshold: 0.6
        batch_size: 4
        device: cuda:0
    """
    def __init__(
        self, keys, model_dir, batch_size=1, degree = 0, device="cuda:0"
    ) -> None:
        self.batch_size = batch_size
        self.keys = keys
        if ":" in device:
            device_name, device_id = device.split(":")
            device_id = int(device_id)
        else:
            device_name = device
            device_id = 0
        self.degree = degree
        self.rotation_matrix = np.array([[np.cos(np.radians(degree)), -np.sin(np.radians(degree))],
                                            [np.sin(np.radians(degree)), np.cos(np.radians(degree))]])
        self.detector = PoseDetector(
            model_path=model_dir, device_name=device_name, device_id=device_id
        )

    def rotate_input(self,img_data, boxes):
        pil_img = Image.fromarray(img_data)
        org_center = np.array(pil_img.size)/2

        route_img = pil_img.rotate(self.degree,expand=True)
        rotate_center = np.array(route_img.size) / 2
        route_img = np.array(route_img)

        boxes = boxes.reshape([-1,2,2])

        rotated_boxes = np.dot(boxes - org_center, self.rotation_matrix) + rotate_center
        rotated_boxes = rotated_boxes.reshape([-1,4])
        
        for idx, box in enumerate(rotated_boxes):
            rotated_boxes[idx] = [min(box[0],box[2]),min(box[1],box[3]),max(box[0],box[2]),max(box[1],box[3])]
        return route_img, rotated_boxes, org_center, rotate_center

    def __call__(self, data:dict) -> dict:
        kpts = []
        imgs = data[self.keys["in_data"]]
        if self.keys.get("in_det",False):
            bboxes = data[self.keys["in_det"]]["boxes"]
            for boxes, img in zip(bboxes, imgs):
                boxes = boxes[:,-4:]
                input_tensor = []
                expand_bboxes = []
                box_num = len(boxes)

                img_h, img_w, _ = img.shape
                for box in boxes:
                    x1, y1, x2, y2 = self.expand_crop(img_h,img_w, box)
                    expand_bboxes.append([x1, y1, x2, y2])
                    input_tensor.append(img[y1:y2, x1:x2])

                if self.degree!=0:
                    rotated_img, rotated_boxes, org_center, rotate_center = self.rotate_input(img, boxes)
                    img_h, img_w, _ = rotated_img.shape
                    for box in rotated_boxes:
                        x1, y1, x2, y2 = self.expand_crop(img_h,img_w, box)
                        expand_bboxes.append([x1, y1, x2, y2])
                        input_tensor.append(rotated_img[y1:y2, x1:x2])

                cur_kpts = self.detector.batch(input_tensor)
                if len(cur_kpts)>0:
                    main_kpts = np.concatenate(cur_kpts[:box_num])
                    for kpts_, bbox in zip(main_kpts, expand_bboxes[:box_num]):
                        kpts_[:,0]+=bbox[0]
                        kpts_[:,1]+=bbox[1]

                    if self.degree!=0:
                        rotate_kpts = np.concatenate(cur_kpts[box_num:])
                        for kpts_, bbox in zip(rotate_kpts, expand_bboxes[box_num:]):
                            kpts_[:,0]+=bbox[0]
                            kpts_[:,1]+=bbox[1]
                        rotate_kpts[:,:,:2] = np.dot(rotate_kpts[:,:,:2]-rotate_center,self.rotation_matrix.T)+org_center
                        main_kpts = np.concatenate([main_kpts,rotate_kpts],axis=1)
                    cur_kpts = main_kpts

                kpts.append(cur_kpts)
                if self.degree!=0:
                    pass

        else:
            input_tensor = imgs
            kpts = self.detector.batch(input_tensor) #[[box_num, point_num, [x,y,score]]]
        data[self.keys["out_kpt"]] = kpts
        return data

    def expand_crop(self, img_h, img_w, box, expand_ratio=0.2):
        xmin, ymin, xmax, ymax = box
        if expand_ratio != 0:
            h_half = max((ymax - ymin) * (1 + expand_ratio) / 2.0,1)
            w_half = max((xmax - xmin) * (1 + expand_ratio) / 2.0,1)
            center = [(ymin + ymax) / 2.0, (xmin + xmax) / 2.0]
            ymin = max(0, int(center[0] - h_half))
            ymax = min(img_h - 1, int(center[0] + h_half))
            xmin = max(0, int(center[1] - w_half))
            xmax = min(img_w - 1, int(center[1] + w_half))
        return int(xmin), int(ymin), int(xmax), int(ymax)

    def release(self):
        pass


def parse_args():
    parser = argparse.ArgumentParser(
        description='show how to use sdk python api')
    parser.add_argument('--device_name', 
                        default= "cuda", 
                        help='name of device, cuda or cpu')
    parser.add_argument('--model_path',
                        default="projects/wind_power/weights/rtmpose_m",
                        help='path of mmdeploy SDK model dumped by model converter')
    parser.add_argument('--image_path',
                        default="/home/smartgis/workspace/data/电力系统/wind_power/预巡/0/keypoint/labelme/data", 
                        help='path of an image')
    parser.add_argument('--save_path',
                        default="./data/output", 
                        help='path of labelme label')
    parser.add_argument(
        '--cls_map',
        default={0:"tip",1:"tip",2:"tip",3:"hood",4:"tail"},
        nargs='+',
        type=int,
        help='bounding box of an object in format (x, y, w, h)')
    parser.add_argument(
        '--bbox',
        # default=[[[0,0,100,100]], 
        #          [[0,0,100,100]]],
        nargs='+',
        type=int,
        help='bounding box of an object in format (x, y, w, h)')
    args = parser.parse_args()
    return args


def main():
    args = parse_args()
    import os,shutil, json

    # detector = PoseDetector(
    #     model_path=args.model_path, device_name=args.device_name, device_id=0)
    detector = MMKeypointDetector(
        keys={
            "in_img":"img_data",
            "out":"kpt"
        },
        model_dir=args.model_path)
    if os.path.exists(args.save_path):
        shutil.rmtree(args.save_path)
    os.makedirs(args.save_path)
    for root_dir, dirs, files in os.walk(args.image_path):
        for filename in files:
            file_path = root_dir+"/"+filename
            img = cv2.imread(file_path)
            if args.bbox is None:
                databox = {
                    "img_data":img[None,...]
                }
                result = detector(databox)["kpt"]
            else:
                # converter (x, y, w, h) -> (left, top, right, bottom)
                print(args.bbox)
                bbox = np.array(args.bbox, dtype=int)
                bbox[2:] += bbox[:2]
                result = detector(img, bbox)
            
            labelme_json = build_labelme_json(result[0][0], args.cls_map, filename, img.shape[1],img.shape[0])
            with open(args.save_path+"/{}.json".format(filename.rsplit(".",1)[0]),"w") as f:
                json.dump(labelme_json,f)

def build_labelme_json(bboxes, cls_map,  filename, width, height):
    labelme_json = {
            "version": "5.0.2", 
            "flags": {},
            "shapes": [],
            "imagePath": "..\\data\\"+filename,
            "imageHeight": height,
            "imageWidth": width,
            "imageData":None,
        }
    shapes = []
    for idx, bbox in enumerate(bboxes):
        cls_name = cls_map[idx]
        shape = {"label":cls_name,
                     "points":[[float(bbox[0]),float(bbox[1])]],
                     "group_id": None,
                     "shape_type": "point",
                     "flags":{}
                     }
        shapes.append(shape)
    labelme_json["shapes"] = shapes
    return labelme_json
    

if __name__ == '__main__':
    main()
