import torch

from image_utils.read_img import read_image
# from image_utils.transforms import ResizeTransform, TransformList


def normalize_bbox(bboxes, size=(2000,2000)):
    normalized_bboxes =[]
    for bbox in bboxes:
        normalized_bbox= [
            int(1000 * bbox[0] / size[0]),
            int(1000 * bbox[1] / size[1]),
            int(1000 * bbox[2] / size[0]),
            int(1000 * bbox[3] / size[1])]
        normalized_bboxes.append(normalized_bbox)
    return normalized_bboxes
def normalize_box(bbox, size):
    return [
        int(1000 * bbox[0] / size[0]),
        int(1000 * bbox[1] / size[1]),
        int(1000 * bbox[2] / size[0]),
        int(1000 * bbox[3] / size[1]),
    ]

def simplify_bbox(bbox):
    return [
        min(bbox[0::2]),
        min(bbox[1::2]),
        max(bbox[2::2]),
        max(bbox[3::2]),
    ]


def merge_bbox(bbox_list):
    x0, y0, x1, y1 = list(zip(*bbox_list))
    return [min(x0), min(y0), max(x1), max(y1)]


# def load_image(image_path):
#     image = read_image(image_path, format="BGR")
#     h = image.shape[0]
#     w = image.shape[1]
#     img_trans = TransformList([ResizeTransform(h=h, w=w, new_h=224, new_w=224)])
#     image = torch.tensor(img_trans.apply_image(image).copy()).permute(2, 0, 1)  # copy to make it writeable
#     return image, (w, h)

def horizontally_nearby(box_1,box_2,tolerance):
    if box_1[0] -box_2[2] < tolerance:
        return True
    return False

def vertically_nearby(box_1,box_2,tolerance):
    if box_1[1] - box_2[3] < tolerance:
        return True
    return False
def nearby_box(box_1,box_2,tolerance):
    if horizontally_nearby(box_1,box_2,tolerance) or vertically_nearby(box_1.box_2,tolerance):
        return True
    return False

