from torch.utils.data import Dataset
import numpy as np
import os
from PIL import Image
from torchvision import transforms

from .utilsssssss import *


class instrument_dataset(Dataset):
    def __init__(self, path_Data, config, split):
        super(instrument_dataset, self)
        self.split = split
        num_classes = 1
        input_size_h = config.img_size
        input_size_w = config.img_size
        input_channels = 3

        train_transformer = transforms.Compose([
            # myRandomHorizontalFlip(p=0.5),
            # myRandomVerticalFlip(p=0.5),
            # myRandomRotation(p=0.5, degree=[0, 360]),
            # myResize(input_size_h, input_size_w),
            # myNormalize(config.dataset, train=True),
            # myToTensor()

            myNormalize(config.dataset, train=True),
            myToTensor(),
            myRandomHorizontalFlip(p=0.5),
            myRandomVerticalFlip(p=0.5),
            myRandomRotation(p=0.5, degree=[0, 360]),
            myResize(input_size_h, input_size_w)

        ])
        test_transformer = transforms.Compose([
            # myResize(input_size_h, input_size_w),
            # myNormalize(config.dataset, train=False),
            # myToTensor()

            myNormalize(config.dataset, train=False),
            myToTensor(),
            myResize(input_size_h, input_size_w)
        ])

        if self.split == "train":
            images_list = os.listdir(path_Data + '/training/images/')
            masks_list = os.listdir(path_Data + '/training/labels/')
            self.data = []
            for i in range(len(images_list)):
                img_path = path_Data + '/training/images/' + images_list[i]
                mask_path = path_Data + '/training/labels/' + masks_list[i]
                # mask_path = mask_path.view(mask_path.shape[1:])  # (C,H,W) --> (H,W)
                self.data.append([img_path, mask_path])
                # print(self.data)
            self.transformer = train_transformer
        elif self.split == "val":
            images_list = os.listdir(path_Data + 'val/val_img/')
            masks_list = os.listdir(path_Data + 'val/val_mask/')
            self.data = []
            for i in range(len(images_list)):
                img_path = path_Data + 'val/val_img/' + images_list[i]
                mask_path = path_Data + 'val/val_mask/' + masks_list[i]
                self.data.append([img_path, mask_path])
            self.transformer = test_transformer
        else:
            images_list = os.listdir(path_Data + '/test/images/')
            masks_list = os.listdir(path_Data + '/test/labels/')
            self.data = []
            for i in range(len(images_list)):
                img_path = path_Data + '/test/images/' + images_list[i]
                mask_path = path_Data + '/test/labels/' + masks_list[i]
                self.data.append([img_path, mask_path])
            self.transformer = test_transformer

    def __getitem__(self, indx):
        img_path, msk_path = self.data[indx]
        # 提取文件名部分
        img_file_name = img_path.split('/')[-1]  # 'ISIC_0010022.jpg'
        msk_file_name = msk_path.split('/')[-1]
        """
            加载的权重是float类型，需要提前转换
        """
        img = np.array(Image.open(img_path).convert('RGB'))  # 转换为RGB格式
        img = img.astype(np.float32)
        msk = np.expand_dims(np.array(Image.open(msk_path).convert('L')), axis=2) / 255  # 转换为灰度图像并进行归一化
        msk = msk.astype(np.float32)
        img, msk = self.transformer((img, msk))
        # 使用squeeze()方法消除单维度的维度
        # msk = msk.squeeze(dim=0)  # [1, 224, 224]
        # msk = msk.view(224, 224)  # 训练时直接 torch.argmax(inputs,dim=1) 压缩回去

        # print("image:{}".format(img))  # [[[239.9589, 238.9599, 236.8399,  ..., 221.0631, 231.4037, 232.7939],
        # print(img.shape)  # [3, 256, 256]
        # print("label:{}".format(msk))  # [[[0., 0., 0.,  ..., 0., 0., 0.],
        # print(msk.shape)  # [256, 256]
        # return img, msk
        return img, msk, img_file_name, msk_file_name

    def __len__(self):
        return len(self.data)

