import os

import matplotlib.pyplot as plt
from torchvision import transforms
from torch.utils.data import Dataset
from PIL import Image
from utils import *
from glob import glob
import albumentations as A

class Neu_Seg_Competition_Dataset(Dataset):
    def __init__(self, data_path, data_type,transform=None):
        self.data_path = data_path
        self.data_type = data_type
        self.image_names_path = glob(os.path.join(data_path, 'images',data_type,'*.jpg'))
        # self.to_tensor = transforms.ToTensor()
        self.to_tensor = transforms.Compose([
            transforms.Grayscale(num_output_channels=1),  # 确保输出为1通道
            transforms.ToTensor()
        ])
        # self.mask_names = glob(os.path.join(data_path, 'annotations',data_type,'*.png'))
        self.transform_ = transform
    def __len__(self):
        return len(self.image_names_path)
    def __getitem__(self, idx):
        image_pathi = self.image_names_path[idx]
        image_name = os.path.basename(image_pathi)[:-4]
        image = Image.open(os.path.join(self.data_path,'images',self.data_type, f'{image_name}.jpg'))
        mask = Image.open(os.path.join(self.data_path,'annotations',self.data_type, f'{image_name}.png'))
        if self.transform_:
            # 转换为NumPy数组以便于Albumentations处理
            image_np = np.array(image)
            mask_np = np.array(mask)
            aug = self.transform_(image=image_np, mask=mask_np)
            # aug = self.transform_(image=(image), mask=(mask))
            img = Image.fromarray(aug['image'])  # Convert back to PIL
            # mask = aug['mask']#numpy array
            # return self.to_tensor(img), torch.from_numpy(mask), image_name
            mask = Image.fromarray(aug['mask'])
            return self.to_tensor(img), PIL_Image_ToTensor(mask), image_name


        return self.to_tensor(image), PIL_Image_ToTensor(mask),image_name

if __name__ == '__main__':
    import torch
    import albumentations as A
    import matplotlib.pyplot as plt
    from utils import *
    from PIL import Image, ImageFilter
    import numpy as np

    t_train = A.Compose([
        # A.Resize(200, 200, interpolation=cv2.INTER_NEAREST),
        A.HorizontalFlip(),
        A.VerticalFlip(),
        # A.Rotate(limit=30, p=0.5),  # 旋转-
        A.GridDistortion(p=0.4),  # 网格变形
        A.RandomBrightnessContrast((0, 0.2), 0.2, p=0.5),  # 亮度、对比度
        A.RandomGamma(p=0.2),  # 随机伽马调整-
        # A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, p=0.5),  # 位移、缩放和旋转-
        A.CLAHE(clip_limit=2.0, p=0.3),  # 对比度限制自适应直方图均衡化-
        A.Lambda(image=sp_noiseImg, p=0.5)
    ])

    train_dataset = Neu_Seg_Competition_Dataset(data_path='.', data_type='test', transform=t_train)
    print(f'Total samples in dataset: {len(train_dataset)}')
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=4, shuffle=True, collate_fn=collate_fn)

    # 这个是loader的测试函数
    visualize_batch(train_loader)

    # # 获取增强后的样本
    # sample_idx = 0  # You can change this index to visualize different samples
    # img_tensor, mask_tensor, image_name = train_dataset[sample_idx]
    # # print(img_tensor.size())
    #
    # # Convert tensors back to numpy for visualization
    # img = img_tensor.permute(1, 2, 0).numpy()  # Change to HWC format
    # # print(type(img_tensor))
    # # print(np.array(img_tensor).shape)
    # mask = mask_tensor.numpy()  # Assuming mask is a single channel
    #
    # # Plotting the original and augmented images
    # plt.figure(figsize=(12, 6))
    # # Original image
    # plt.subplot(1, 2, 1)
    # # plt.imshow(img_tensor,cmap='gray')
    # plt.imshow(img, cmap='gray')
    # plt.title(f'Augmented Image: {image_name}')
    # plt.axis('off')
    #
    # # Mask
    # plt.subplot(1, 2, 2)
    # plt.imshow(mask, cmap='gray')
    # plt.title('Augmented Mask')
    # plt.axis('off')
    #
    # plt.show()


    # print(torch.unique(train_dataset[120][1]))
    # fig,axs = plt.subplots(1,2,figsize=(10,40))
    # # axs[0,0] = plt.imshow(train_dataset[120][0])
    # axs[0,1] = plt.imshow(train_dataset[120][1])
    # plt.show()