import torch
from torch.utils.data import Dataset
from torchvision import transforms
from PIL import Image
import os
import matplotlib.pyplot as plt
import numpy as np

# 从 dataload.py 中复制过来的 draw_mask 函数
def draw_mask(image: Image.Image, target: Image.Image = None) -> Image.Image:
    """在PASCAL VOC图像上绘制分割掩码"""
    color_map = {
        0: (0, 0, 0, 128),  # 背景，黑色
        1: (247, 116, 95, 128),  # 飞机
        2: (232, 129, 49, 128),  # 自行车
        3: (208, 142, 49, 128),  # 鸟
        4: (190, 150, 49, 128),  # 船
        5: (173, 156, 49, 128),  # 瓶子
        6: (173, 156, 49, 128),  # 公交车
        7: (155, 162, 49, 128),  # 汽车
        8: (134, 167, 49, 128),  # 猫
        9: (99, 174, 49, 128),  # 椅子
        10: (49, 178, 82, 128),  # 牛
        11: (51, 176, 122, 128),  # 餐桌
        12: (52, 174, 142, 128),  # 狗
        13: (53, 173, 157, 128),  # 马
        14: (54, 172, 170, 128),  # 摩托车
        15: (54, 170, 182, 128),  # 人
        16: (56, 168, 197, 128),  # 盆栽植物
        17: (57, 166, 216, 128),  # 羊
        18: (73, 160, 244, 128),  # 沙发
        19: (135, 149, 244, 128),  # 火车
        20: (172, 136, 244, 128),  # 电视/显示器
        255: (255, 255, 255, 128),  # 未标记区域，白色
    }
    mask = Image.new("RGBA", image.size, (0, 0, 0, 0))
    target_array = np.array(target).T
    for x in range(target.width):
        for y in range(target.height):
            mpv = target_array[x, y]
            if target_array[x, y] != 0 and target_array[x, y] != 255:
                mask.putpixel((x, y), color_map[mpv])
    image = image.convert("RGBA")
    merged_image = Image.alpha_composite(image, mask)
    return merged_image

def voc_rand_crop(feature, label, height, width):
    """随机裁剪特征feature和标签图像label"""
    rect = transforms.RandomCrop.get_params(
        feature, (height, width))
    feature = transforms.functional.crop(feature, *rect)
    label = transforms.functional.crop(label, *rect)
    return feature, label

class VOCFCNDataset(Dataset):
    def __init__(self, root_dir, image_set='train', crop_size=(256, 256), transform=None, target_transform=None):
        """
        初始化 VOC 数据集加载器
        :param root_dir: 数据集根目录
        :param image_set: 数据集类型，如 'train' 或 'val'
        :param crop_size: 裁剪尺寸
        :param transform: 图像预处理转换
        :param target_transform: 标签预处理转换
        """
        self.root_dir = root_dir
        self.image_set = image_set
        self.crop_size = crop_size
        self.transform = transform
        self.target_transform = target_transform

        # 读取图像和标签文件列表
        image_dir = os.path.join(root_dir, 'JPEGImages')
        target_dir = os.path.join(root_dir, 'SegmentationClass')
        splits_dir = os.path.join(root_dir, 'ImageSets/Segmentation')
        split_f = os.path.join(splits_dir, image_set.rstrip('\n') + '.txt')

        with open(os.path.join(split_f), "r") as f:
            file_names = [x.strip() for x in f.readlines()]

        self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names]
        self.targets = [os.path.join(target_dir, x + ".png") for x in file_names]

        # 过滤掉尺寸小于裁剪尺寸的图片
        self.valid_indices = []
        for i in range(len(self.images)):
            image = Image.open(self.images[i])
            if image.size[0] >= crop_size[0] and image.size[1] >= crop_size[1]:
                self.valid_indices.append(i)

    def __len__(self):
        return len(self.valid_indices)

    def __getitem__(self, idx):
        real_idx = self.valid_indices[idx]
        image = Image.open(self.images[real_idx]).convert('RGB')
        target = Image.open(self.targets[real_idx])

        # 随机裁剪
        image, target = voc_rand_crop(image, target, *self.crop_size)

        if self.transform:
            image = self.transform(image)
        if self.target_transform:
            target = self.target_transform(target)

        return image, target

# 示例使用
if __name__ == "__main__":
    root_dir = './data/VOCdevkit/VOC2012'
    crop_size = (256, 256)

    # 定义图像和标签的预处理转换
    transform = transforms.Compose([
        transforms.ToTensor()
    ])

    target_transform = transforms.Compose([
        transforms.ToTensor()
    ])

    # 创建数据集实例
    dataset = VOCFCNDataset(root_dir, image_set='train', crop_size=crop_size, transform=transform, target_transform=target_transform)

    # 定义要打印的样本数量
    num_samples = 5  # 可以根据需要修改这个值
    for i in range(num_samples):
        image, target = dataset[i]
        print(f"Sample {i} - Image shape: {image.shape}")
        # 将 Tensor 转换回 PIL Image 以使用 draw_mask 函数
        image_pil = transforms.ToPILImage()(image)
        target_pil = transforms.ToPILImage()(target)

        # 绘制掩码
        masked_image = draw_mask(image_pil, target_pil)
        # 显示掩码图像
        plt.imshow(masked_image)
        plt.title(f"Masked Image - Sample {i}")
        plt.show()