import os

import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms


class CustomDataset(Dataset):
    def __init__(self, root_dir, transform, semantic, class_to_label=None):
        self.root_dir = root_dir
        self.transform = transform
        self.class_to_label = class_to_label if class_to_label is not None else {}
        self.images = [f for f in os.listdir(root_dir) if f.endswith(('.bmp', '.jpg', '.png'))]
        self.semantic = semantic
        self.reverse_semantic = {tuple(v): k for k, v in self.semantic.items()}

        # 如果没有提供class_to_label字典，我们在这里创建它
        if not self.class_to_label:
            self._create_class_to_label_mapping()
            self.label_to_class = {i: cls_name for i, cls_name in enumerate(self.class_to_label)}

    def _create_class_to_label_mapping(self):
        # 假设类别是从0开始编号的连续整数
        self.classes = sorted(set([filename.split('_')[0] for filename in self.images]))
        self.class_to_label = {cls: i for i, cls in enumerate(self.classes)}

    def get_class_to_label(self):
        return self.class_to_label

    def find_key_by_label(self, label_list):
        """
        处理**批量标签列表**（每个元素为单样本的标签子列表，如 [[3,3,3], [1,2,3]]）
        Args:
            label_list: 二维列表，形状为 [batch_size, 3]，如 [[1,1,2], [2,1,2], ...]
        Returns:
            批量对应的标签值列表，每个元素为单样本的预测标签值（如 [10, 5, ...]），不存在则返回 0
        """
        results = []
        for sub_list in label_list:
            try:
                # 1. 处理子列表：转换为元组（可哈希，作为字典键）
                # （防御性处理：确保子列表长度为3，否则视为无效）
                if len(sub_list) != 3:
                    raise ValueError(f"子列表长度应为3，实际为{len(sub_list)}: {sub_list}")
                label_tuple = tuple(sub_list)
            except (TypeError, ValueError) as e:
                # 若子列表格式错误（如非列表、长度异常），默认返回0对应的标签值
                print(f"标签格式错误: {e}，默认返回0")
                results.append(0)
                continue

            # 2. 查找对应的类名（如 "3-OR-021"），默认 fallback 到 "0-No"
            class_name = self.reverse_semantic.get(label_tuple, '0-No')

            # 3. 查找类名对应的标签值，默认 fallback 到 0
            label_value = self.class_to_label.get(class_name, 0)
            results.append(label_value)

        return results



    def __len__(self):
        return len(self.images)


    def __getitem__(self, idx):
        # 获取图片路径
        image_path = os.path.join(self.root_dir, self.images[idx])
        # 打开图片并转换为RGB格式
        # image = Image.open(image_path).convert('RGB')
        image = Image.open(image_path)
        # 如果有变换，则进行变换
        if self.transform:
            image = self.transform(image)

        # 提取文件名中的类别
        base_filename = os.path.splitext(self.images[idx])[0]
        class_name = base_filename.split('_')[0]
        # 将类别转换为标签
        multi_label = self.semantic[class_name]

        load_label = torch.tensor(multi_label[0], dtype=torch.long)
        fault_type_label = torch.tensor(multi_label[1], dtype=torch.long)
        diameter_label = torch.tensor(multi_label[2], dtype=torch.long)
        label = self.class_to_label[class_name]

        return image, (load_label, fault_type_label, diameter_label), label


def create_dataloaders(data_path, batch_size, transform=transforms.ToTensor(), num_workers=0, train_shuffle=True):
    data_dict = np.load(os.path.join(data_path, 'att.npy'), allow_pickle=True).item()
    # 训练集数据加载器
    train_dir = os.path.join(data_path, 'train')
    train_dataset = CustomDataset(root_dir=train_dir, transform=transform, semantic=data_dict)
    # 初始化验证集Dataset
    validation_dir = os.path.join(data_path, 'val')  # 替换为你的验证集图片目录
    validation_dataset = CustomDataset(root_dir=validation_dir, transform=transform, semantic=data_dict)
    train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=train_shuffle,
                                  num_workers=num_workers)
    val_loader = DataLoader(dataset=validation_dataset, batch_size=batch_size, shuffle=False)
    return train_loader, val_loader


# if __name__ == '__main__':
#     data_dict = np.load("../../run/att.npy", allow_pickle=True).item()
#     data = CustomDataset(r'D:\Code\2-ZSL\Zero-Shot-Learning\data\0HP',None, data_dict)
#     d0,d1,d2 = data[0]
#     print(len(data))