from datasets import load_dataset
from typing import Union, Optional, List
import torch
from torchvision.ops import box_convert  # 调整box 形状
from torchvision.utils import draw_bounding_boxes  # 画框
from torchvision.transforms.functional import pil_to_tensor, to_pil_image  # PIL 转tensor 和 tensor 转PIL
import functools
import albumentations
import numpy as np


def transforms(examples, resize_hw=(260, 260), horizon_flip_rate=0.5, bright_contrast_rate=0.5):
    """
    tranforms 对样本进行预处理 , 包括 对图片以及其对应的boxes 都进行处理
    包括 resize , 随机水平翻转 , 随机对比度调整 (对于图片改变了size, boxes 也会进行同样的操作)
    :param examples:
    :param resize_hw:
    :param horizon_flip_rate:
    :param bright_contrast_rate:
    :return:
    """

    transform = albumentations.Compose([
        albumentations.Resize(resize_hw[0], resize_hw[1]),
        albumentations.HorizontalFlip(p=horizon_flip_rate),
        albumentations.RandomBrightnessContrast(p=bright_contrast_rate),  # 随机亮度对比度
        ],
        bbox_params=albumentations
        .BboxParams(format='pascal_voc', label_fields=['classes'])
    )

    images, bboxes, classes = [], [], []
    for image, objects in zip(examples['image'], examples['objects']):
        image = np.array(image.convert("RGB"))
        out = transform(
            image=image,
            bboxes=objects['bboxes'],
            classes=objects['classes']
        )
        images.append(torch.tensor(out['image']).permute(2, 0, 1))  # 转为 [c h w]
        bboxes.append(torch.tensor(out['bboxes']))
        classes.append(out['classes'])
    return {
        'id': examples['id'],
        'image': images,
        'bboxes': bboxes,
        'classes': classes,
        'height': examples['height'],
        'width': examples['width']
    }


def pascal_voc_2007():
    dataset = load_dataset("/export/data/pascal_voc", "voc2007_main", split="train")
    # 偏函数赋默认值
    # transform = functools.partial(transforms,resize_hw=(480, 480), horizon_flip_rate=0.5, bright_contrast_rate=0.5)
    # dataset.set_transform(transform)
    dataset.set_transform(transforms)
    return dataset


def pascal_voc_2012():
    dataset = load_dataset("/export/data/pascal_voc", "voc2012_main", split="train")
    # 偏函数赋默认值
    # transform = functools.partial(transforms,resize_hw=(480, 480), horizon_flip_rate=0.5, bright_contrast_rate=0.5)
    # dataset.set_transform(transform)
    dataset.set_transform(transforms)
    return dataset


def get_classes(dataset):
    return dataset.features["objects"].feature["classes"].names


def show_img_one(image: torch.Tensor, boxes_xyxy: torch.Tensor, save_name: str, labels: Optional[List[str]] = None):
    to_pil_image(
        draw_bounding_boxes(
            image,
            boxes_xyxy,
            colors='red',
            labels=labels
        )
    ).save(save_name)


def main():
    dataset = pascal_voc_2007()
    example = dataset[1]
    print(example)
    categories = dataset.features['objects'].feature['classes']
    boxes_xyxy = torch.tensor(example['objects']['bboxes'])
    # boxes_xyxy = box_convert(boxes_xywh, 'xywh', 'xyxy')
    labels = [categories.int2str(x) for x in example['objects']['classes']]
    to_pil_image(
        draw_bounding_boxes(
            pil_to_tensor(example['image']),  # 将PIL 转为temsor
            boxes_xyxy,
            colors="red",
            labels=labels,
        )
    ).save("./test.png")
    #  With albumentations, you can apply transforms that
    #  will affect the image while also updating the bboxes accordingly.
    #  In this case, the image is resized to (480, 480), flipped horizontally, and brightened.
    transform = albumentations.Compose([
        albumentations.Resize(480, 480),
        albumentations.HorizontalFlip(p=1.0),
        albumentations.RandomBrightnessContrast(p=1.0),  # 随机亮度对比度
    ], bbox_params=albumentations.BboxParams(format='pascal_voc', label_fields=['classes']))
    image = np.array(example['image'])
    out = transform(
        image=image,
        bboxes=example['objects']['bboxes'],
        classes=example['objects']['classes'],
    )
    print(image.shape)
    image = torch.tensor(out['image']).permute(2, 0, 1)
    boxes_xyxy = torch.stack([torch.tensor(x) for x in out['bboxes']])
    # boxes_xyxy = box_convert(boxes_xywh, 'xywh', 'xyxy')
    labels = [categories.int2str(x) for x in out['classes']]
    to_pil_image(
        draw_bounding_boxes(
            image,
            boxes_xyxy,
            colors='red',
            labels=labels
        )
    ).save("./test_new.png")


if __name__ == '__main__':
    # main()
    dataset = pascal_voc_2012()
    categories = dataset.features['objects'].feature['classes']
    data = dataset[5]
    show_img_one(data['image'], data['bboxes'], "./func_test.png",
                 [categories.int2str(x) for x in data['classes']])
    # print(0)
    pass
