from sahi.slicing import slice_image
import numpy as np
from core.algorithm.modules.general import multiclass_nms

class SlicePre():
    """
    分割图片, 在送入模型之前将图片切分, 需要与SlicePost配合使用

    通过将图片切分以识别小目标, 再将目标聚合起来
    args:
        keys: 指定了从data中读取数据的关键字和保存数据的关键字
            格式: keys={"in":"your_in_keyword",
                        "out":"your_save_keyword"
                        }
    """
    def __init__(self, keys, overlap_width_ratio=0.25, overlap_height_ratio=0.25):
        self.keys = keys
        self.overlap_width_ratio=overlap_width_ratio
        self.overlap_height_ratio=overlap_height_ratio


    def __call__(self, data:dict):
        res = {}
        img_data = data["img_data"]
        org_image = img_data[0]
        org_shape = org_image.shape

        slice_height = int(org_shape[0]//2*(1+self.overlap_height_ratio)//2*2+2)
        slice_width = int(org_shape[1]//2*(1+self.overlap_width_ratio)//2*2+2)

        slice_image_result = slice_image(
                                    image=org_image,
                                    slice_height=slice_height,
                                    slice_width=slice_width,
                                    overlap_height_ratio=self.overlap_height_ratio,
                                    overlap_width_ratio=self.overlap_width_ratio
                                )
        sub_img_num = len(slice_image_result)
        res["img_data"] = np.array([slice_image_result.images[_ind] for _ind in range(sub_img_num)])
        res["starting_pixels"] = slice_image_result.starting_pixels
        data[self.keys["out"]] = res
        return data
    

class SlicePost():
    """
    分割图片的后处理, 需要与SlicePre配合使用

    将分割识别的结果整合在一起
    args:
        keys: 指定了从data中读取数据的关键字和保存数据的关键字
            格式: keys={"in":"your_in_keyword",
                        "out":"your_save_keyword"
                        }
    """
    def __init__(self, keys, cls_num):
        self.keys = keys
        self.cls_num = cls_num

    def __call__(self, data:dict):
        result = data[self.keys["in"]]
        starting_pixels = data[self.keys["slice_pre"]]["starting_pixels"]
        sub_img_num = len(starting_pixels)
        merged_bboxs=[]
        st, ed = 0, result['boxes_num'][0]
        for _ind in range(sub_img_num):
            boxes_num = result['boxes_num'][_ind]
            ed = boxes_num + st
            shift_amount = starting_pixels[_ind]
            result['boxes'][st:ed][:, 2:4] = result['boxes'][st:ed][:, 2:4] + shift_amount
            result['boxes'][st:ed][:, 4:6] = result['boxes'][st:ed][:, 4:6] + shift_amount
            merged_bboxs.append(result['boxes'][st:ed])
            st = ed
        final_boxes = multiclass_nms(np.concatenate(merged_bboxs),self.cls_num)
        if len(final_boxes)<=0:
            final_boxes = [np.empty_like(result['boxes'])]
        result["boxes"] = np.concatenate(final_boxes)
        result["boxes_num"] = np.array([len(result["boxes"])])


        data[self.keys["out"]] = result
        return data