import torch
from torch.utils.data import Dataset
from torchvision import transforms
from PIL import Image
import os
import matplotlib.pyplot as plt
import numpy as np

import os
import torch
import torchvision
# from d2l import torch as d2l

# d2l.DATA_HUB['voc2012'] = (d2l.DATA_URL + 'VOCtrainval_11-May-2012.tar',
# '4e443f8a2eca6b1dac8a6c57641b67dd40621a49')
# voc_dir = d2l.download_extract('voc2012', 'VOCdevkit/VOC2012')
voc_dir=root = 'data/VOCdevkit/VOC2012'
#@save
VOC_COLORMAP = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
                [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
                [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
                [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
                [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
                [0, 64, 128]]

#@save
VOC_CLASSES = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
                'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
                'diningtable', 'dog', 'horse', 'motorbike', 'person',
                'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']


#@save
def voc_colormap2label():
    """构建从RGB到VOC类别索引的映射"""
    colormap2label = torch.zeros(256 ** 3, dtype=torch.long)
    for i, colormap in enumerate(VOC_COLORMAP):
        colormap2label[
            (colormap[0] * 256 + colormap[1]) * 256 + colormap[2]] = i
    """
    这一行是将RGB三通道像素值按照R*256*256+G*256+B的方法算成一个像素值，
    再把这个值作为字典索引，其value=1.2.3......（此处123是分类）
    """
    return colormap2label

#@save
def voc_label_indices(colormap, colormap2label):
    """将VOC标签中的RGB值映射到它们的类别索引"""
    colormap = colormap.permute(1, 2, 0).numpy().astype('int32')
    """colormap：任意一张读入的图片的RGB值，其前两维是batch和channel"""
    idx = ((colormap[:, :, 0] * 256 + colormap[:, :, 1]) * 256+ colormap[:, :, 2])
    return colormap2label[idx]


# @save
def voc_rand_crop(feature, label, height, width):
    """随机裁剪特征feature和标签图像label"""

    """
    get_params允许裁剪之后的区域返回边框的坐标数值（边界框）
    *rect就是把边界框四个坐标展开,这样对图片和标号做同样的裁剪
    """
    rect = torchvision.transforms.RandomCrop.get_params(
        feature, (height, width))
    feature = torchvision.transforms.functional.crop(feature, *rect)
    label = torchvision.transforms.functional.crop(label, *rect)
    return feature, label

#@save
def read_voc_images(voc_dir, is_train=True):
    """读取所有VOC图像并标注"""
    txt_fname = os.path.join(voc_dir, 'ImageSets', 'Segmentation','train.txt' if is_train else 'val.txt')
    mode = torchvision.io.image.ImageReadMode.RGB
    with open(txt_fname, 'r') as f:
        images = f.read().split()
    features, labels = [], []
    for i, fname in enumerate(images):
        features.append(torchvision.io.read_image(os.path.join(
            voc_dir, 'JPEGImages', f'{fname}.jpg')))
        labels.append(torchvision.io.read_image(os.path.join(
            voc_dir, 'SegmentationClass' ,f'{fname}.png'), mode))
    return features, labels

train_features, train_labels = read_voc_images(voc_dir, True)


#@save
class VOCSegDataset(torch.utils.data.Dataset):
    """一个用于加载VOC数据集的自定义数据集"""

    def __init__(self, is_train, crop_size, voc_dir):
        self.transform = transforms.Normalize(
            mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        self.crop_size = crop_size
        features, labels = read_voc_images(voc_dir, is_train=is_train)
        self.features = [self.normalize_image(feature)for feature in self.filter(features)]#去掉小图片后标准化
        self.labels = self.filter(labels)
        self.colormap2label = voc_colormap2label()#构造这个字典有一定开销，所以在init里面做了
        print('read ' + str(len(self.features)) + ' examples')

    def normalize_image(self, img):
        """像素值/255后标准化"""
        return self.transform(img.float() / 255)

    def filter(self, imgs):
        """去掉尺寸小于crop_size的图片"""
        return [img for img in imgs if (
            img.shape[1] >= self.crop_size[0] and
            img.shape[2] >= self.crop_size[1])]

    def __getitem__(self, idx):
        feature, label = voc_rand_crop(self.features[idx], self.labels[idx],
                                       *self.crop_size)
        """label的RGB值换成类别标号，例如aeroplane区域像素换成1"""
        return feature, voc_label_indices(label, self.colormap2label)

    def __len__(self):
        return len(self.features)

# 从 dataload.py 中复制过来的 draw_mask 函数
def draw_mask(image: Image.Image, target: Image.Image = None) -> Image.Image:
    """在PASCAL VOC图像上绘制分割掩码"""
    color_map = {
        0: (0, 0, 0, 128),  # 背景，黑色
        1: (247, 116, 95, 128),  # 飞机
        2: (232, 129, 49, 128),  # 自行车
        3: (208, 142, 49, 128),  # 鸟
        4: (190, 150, 49, 128),  # 船
        5: (173, 156, 49, 128),  # 瓶子
        6: (173, 156, 49, 128),  # 公交车
        7: (155, 162, 49, 128),  # 汽车
        8: (134, 167, 49, 128),  # 猫
        9: (99, 174, 49, 128),  # 椅子
        10: (49, 178, 82, 128),  # 牛
        11: (51, 176, 122, 128),  # 餐桌
        12: (52, 174, 142, 128),  # 狗
        13: (53, 173, 157, 128),  # 马
        14: (54, 172, 170, 128),  # 摩托车
        15: (54, 170, 182, 128),  # 人
        16: (56, 168, 197, 128),  # 盆栽植物
        17: (57, 166, 216, 128),  # 羊
        18: (73, 160, 244, 128),  # 沙发
        19: (135, 149, 244, 128),  # 火车
        20: (172, 136, 244, 128),  # 电视/显示器
        255: (255, 255, 255, 128),  # 未标记区域，白色
    }
    mask = Image.new("RGBA", image.size, (0, 0, 0, 0))
    target_array = np.array(target).T
    for x in range(target.width):
        for y in range(target.height):
            mpv = target_array[x, y]
            if target_array[x, y] != 0 and target_array[x, y] != 255:
                mask.putpixel((x, y), color_map[mpv])
    image = image.convert("RGBA")
    merged_image = Image.alpha_composite(image, mask)
    return merged_image

# 示例使用
if __name__ == "__main__":
    crop_size = (320, 480)
    voc_train = VOCSegDataset(True, crop_size, voc_dir)
    voc_test = VOCSegDataset(False, crop_size, voc_dir)

    # 定义要打印的样本数量
    num_samples = 5  # 可以根据需要修改这个值
    for i in range(num_samples):
        image, target = voc_train[i]
        print(f"Sample {i} - Image shape: {image.shape}")
        # 将 Tensor 转换回 PIL Image 以使用 draw_mask 函数
        image_pil = transforms.ToPILImage()(image)
        target_pil = transforms.ToPILImage()(target)

        # 绘制掩码
        masked_image = draw_mask(image_pil, target_pil)
        # 显示掩码图像
        plt.imshow(masked_image)
        plt.title(f"Masked Image - Sample {i}")
        plt.show()