###LCX20250918,COPILOT构造。

import os
import torch
from torch.utils.data import Dataset
import numpy as np
import cv2
from PIL import Image
from torchvision import transforms
import scipy.io as sio

class MyPrivateDataset(Dataset):
    def __init__(self, image_dir, param_dir, label_dir, image_size=224, scale=[1.4, 1.8], trans_scale=0.0,transform=None):
        self.image_dir = image_dir
        self.param_dir = param_dir
        self.label_dir = label_dir
        self.image_size = image_size
        ###LCX250918
        self.scale = scale
        self.trans_scale = trans_scale

        self.transform = transform or transforms.Compose([
            transforms.Resize((image_size, image_size)),
            transforms.ToTensor(),
        ])

        self.image_list = sorted([
            f for f in os.listdir(image_dir) if f.endswith('.png')
        ])
        assert len(self.image_list) > 0, "No images found in {}".format(image_dir)

    def __len__(self):
        return len(self.image_list)

    def __getitem__(self, idx):
        name = os.path.splitext(self.image_list[idx])[0]

        # 1️⃣ 加载图像
        img_path = os.path.join(self.image_dir, f'{name}.png')
        image = Image.open(img_path).convert('RGB')
        image = self.transform(image)

        # 2️⃣ 加载参数（.mat）
        param_path = os.path.join(self.param_dir, f'{name}.mat')
        if os.path.exists(param_path):
            param = sio.loadmat(param_path)
        else:
            param = {}

        # 3️⃣ 加载 landmarks（.npy）
        landmark_path = os.path.join(self.label_dir, f'{name}_landmarks.npy')
        if os.path.exists(landmark_path):
            landmarks = np.load(landmark_path)
        else:
            landmarks = np.zeros((68, 2), dtype=np.float32)

        # 4️⃣ 加载 mask（.png）
        mask_path = os.path.join(self.label_dir, f'{name}_mask.png')
        if os.path.exists(mask_path):
            mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
            mask = cv2.resize(mask, (self.image_size, self.image_size))
            mask = torch.from_numpy(mask).float() / 255.0
        else:
            mask = torch.zeros((self.image_size, self.image_size), dtype=torch.float32)

        sample = {
            'image': image,
            'param': param,
            'landmark': torch.from_numpy(landmarks).float(), ###LCX250918删掉S
            'mask': mask,
            'name': name
        }

        return sample
