import os

import torch
from torch.utils.data import Dataset

from PIL import Image
import cv2

class mf_data_seg(Dataset):
    def __init__(self,root_path,flag = '',transforms = None):
        assert flag in ['train','val','test']

        self.root_path = root_path
        self.flag = flag
        self.transforms = transforms

        self.img_dir = os.path.join(self.root_path,'img')
        self.img_list = os.listdir(self.img_dir)

    def __len__(self):
        return len(self.img_list)

    def __getitem__(self, idx):
        #transforms = self.transforms
        #flag = self.flag

        img_name = self.img_list[idx]
        #root_path = self.root_path

        if self.flag != 'test':

            img_item_path = os.path.join(self.root_path,'img',img_name)  
            label_item_path = os.path.join(self.root_path,'label',img_name) 
            
                   
            img = Image.open(img_item_path)
            label = Image.open(label_item_path)

            img = self.transforms(img)
            label = self.transforms(label)                      

            return img,label

        else:     #falg = test 无标签，人眼看一下分割情况,需要图片名字用以保存预测结果
            img_item_path = os.path.join(self.root_path,'img',img_name)  
            label_item_path = os.path.join(self.root_path,'label',img_name)                    
            img = Image.open(img_item_path)
            label = Image.open(label_item_path)
            img = self.transforms(img)
            label = self.transforms(label)   

            return img,label,img_name




import os
import numpy as np
import torch
from torch.utils.data import Dataset
from PIL import Image
import cv2


import os
import cv2
import torch
from torch.utils.data import Dataset

# class mf_data_seg_A(Dataset):
#     def __init__(self, root_path, flag='', transforms=None):
#         """
#         初始化数据集
#         :param root_path: 数据集根目录，例如 "./od1"
#         :param flag: 数据集类型（train, val, test）
#         :param transforms: 数据增强方法
#         """
#         assert flag in ['train', 'val', 'test'], "flag 必须是 'train', 'val', 'test' 中的一个"

#         self.root_path = root_path
#         self.flag = flag
#         self.transforms = transforms

#         # 拼接路径
#         self.img_dir = self._fix_path(os.path.join(self.root_path, flag, 'img'))
#         self.label_dir = self._fix_path(os.path.join(self.root_path, flag, 'label'))

#         # 确保路径存在
#         if not os.path.exists(self.img_dir) or not os.path.exists(self.label_dir):
#             raise ValueError(f"路径不存在: {self.img_dir} 或 {self.label_dir}")

#         self.img_list = os.listdir(self.img_dir)  # 获取图片列表

#     def __len__(self):
#         return len(self.img_list)

#     def __getitem__(self, idx):
#         img_name = self.img_list[idx]
#         img_item_path = os.path.join(self.img_dir, img_name)
#         label_item_path = os.path.join(self.label_dir, img_name)

#         # 读取图片和标签
#         img = cv2.imread(img_item_path)  # BGR 格式
#         label = cv2.imread(label_item_path, cv2.IMREAD_GRAYSCALE)  # 单通道灰度

#         if img is None or label is None:
#             raise ValueError(f"无法读取图片或标签: {img_name}")

#         # 转为 RGB 格式
#         img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

#         # 应用数据增强
#         if self.transforms is not None and callable(self.transforms):
#             transformed = self.transforms(image=img, mask=label)
#             img = transformed['image']
#             label = transformed['mask']

#         # 转换为张量
#         # img = torch.from_numpy(img).permute(2, 0, 1).float()  # [C, H, W]
#         # label = torch.from_numpy(label).unsqueeze(0).float()  # [1, H, W]

#         # 标签二值化
#         label[label >= 0.5] = 1.0
#         label[label < 0.5] = 0.0

#         return img, label

#     @staticmethod
#     def _fix_path(path):
#         """
#         自动修正路径中可能出现的重复部分，例如 "train/train/img"
#         :param path: 原始路径
#         :return: 修正后的路径
#         """
#         parts = path.split(os.sep)  # 按路径分隔符拆分
#         fixed_parts = []
#         for part in parts:
#             if len(fixed_parts) > 0 and fixed_parts[-1] == part:
#                 continue  # 跳过重复部分
#             fixed_parts.append(part)
#         return os.sep.join(fixed_parts)  # 拼接修正后的路径

class mf_data_seg_A:
    def __init__(self, root_path, transforms=None, flag=None, target_pixel=(230, 135)):
        """
        数据集初始化
        :param root_path: 数据集的根路径，包含 img 和 label 文件夹
        :param transforms: 数据增强操作
        :param flag: 可选的标志参数
        :param target_pixel: 目标点
        """
        self.img_path = os.path.join(root_path, 'img')  # 图像文件夹路径
        self.label_path = os.path.join(root_path, 'label')  # 标签文件夹路径
        self.transforms = transforms
        self.flag = flag  # 保存 flag 参数
        self.target_pixel = target_pixel  # 保存目标点坐标

        # 加载图片和标签路径
        self.img_list = self._load_files(self.img_path)
        self.label_list = self._load_files(self.label_path)

        # 确保图片和标签一一对应
        assert len(self.img_list) == len(self.label_list), \
            f"Number of images ({len(self.img_list)}) and labels ({len(self.label_list)}) do not match."

    def _load_files(self, path):
        """
        加载路径下的所有文件
        """
        files = [os.path.join(path, f) for f in os.listdir(path) if f.lower().endswith(('.jpg', '.png', '.jpeg'))]
        files.sort()  # 确保文件顺序一致
        return files

    def __len__(self):
        return len(self.img_list)

    def __getitem__(self, idx):
        """
        获取数据和标签
        :param idx: 索引
        :return: 经过 transforms 处理的图片和标签
        """
        if idx >= len(self.img_list):
            raise IndexError(f"Index {idx} out of range for dataset size {len(self.img_list)}")

        # 加载图片和标签
        img_path = self.img_list[idx]
        label_path = self.label_list[idx]

        # 使用 OpenCV 读取图片和标签
        img = cv2.imread(img_path)  # 读取图片
        label = cv2.imread(label_path, cv2.IMREAD_GRAYSCALE)  # 读取标签，假设是单通道

        # 目标点作为关键点
        keypoints = [self.target_pixel]

        # 将目标点转换为 `albumentations` 使用的格式
        keypoints = np.array(keypoints, dtype=np.float32).reshape(-1, 2)

        # 应用 transforms
        if self.transforms:
            transformed = self.transforms(image=img, mask=label, keypoints=keypoints)
            img = transformed['image']
            label = transformed['mask']
            keypoints = transformed['keypoints']

        # 转换为 PyTorch 张量
        img = img.float() / 255.0  # 如果像素值范围是 [0, 255]，将其归一化为 [0, 1]
        label = label.unsqueeze(0).float()/255.0  # 标签通常也是浮点型，或根据任务需求调整为 long 型（分类任务）

        # 返回增强后的图片、标签和目标点
        return img, label, keypoints






# class mf_data_seg_A(torch.utils.data.Dataset):
#     def __init__(self, root_path, flag='', transforms=None):
#         assert flag in ['train', 'val', 'test']

#         self.root_path = root_path
#         self.flag = flag
#         self.transforms = transforms

#         self.img_dir = os.path.join(self.root_path, 'img')
#         self.label_dir = os.path.join(self.root_path, 'label')
#         self.img_list = [f for f in os.listdir(self.img_dir) if f in os.listdir(self.label_dir)]

#     def __len__(self):
#         return len(self.img_list)

#     def __getitem__(self, idx):
#         img_name = self.img_list[idx]
#         img_item_path = os.path.join(self.img_dir, img_name)
#         label_item_path = os.path.join(self.label_dir, img_name)

#         # 读取图片和标签
#         img = cv2.imread(img_item_path).copy()  # BGR 格式
#         label = cv2.imread(label_item_path, cv2.IMREAD_GRAYSCALE).copy()

#         if self.transforms is not None:
#             if callable(self.transforms):
#                 # 修复 transforms 的调用问题
#                 transformed = self.transforms(image=img, mask=label)
#                 img = transformed['image']
#                 label = transformed['mask']
#             else:
#                 raise ValueError("`transforms` 必须是可调用对象，例如 albumentations.Compose。")
#         else:
#             # 如果未定义 transforms，默认进行归一化
#             img = img / 255.0
#             label = label / 255.0

#         # 转换为 PyTorch 张量
#         img = torch.tensor(img, dtype=torch.float32)  # CxHxW
#         label = torch.tensor(label, dtype=torch.float32).unsqueeze(0)  # 添加通道维度

#         # 标签二值化
#         label[label >= 0.5] = 1.0
#         label[label < 0.5] = 0.0

#         return img, label, img_name  # 确保返回图像、标签和文件名

class mf_data_clas(Dataset):
    def __init__(self,root_path,flag = '',tar = '', transforms = None):
        self.root_path = root_path
        self.flag = flag
        self.tar = tar
        self.transforms = transforms

        self.g_img_dir = os.path.join(self.root_path,'g')
        self.g_img_list = os.listdir(self.g_img_dir)
        self.n_img_dir = os.path.join(self.root_path,'n')
        self.n_img_list = os.listdir(self.n_img_dir)

    def __len__(self):
        tar = self.tar
        if tar == 'g':
            return len(self.g_img_list)
        else:
            return len(self.n_img_list)

    def __getitem__(self, idx):
        transforms = self.transforms
        flag = self.flag

        root_path = self.root_path
        g_img_name = self.g_img_list
        n_img_name = self.n_img_list
        tar = self.tar


        if flag != 'test':    #flag ==train+val

            if tar == 'g':
                img_item_path = os.path.join(root_path,'g',g_img_name[idx])
                img = Image.open(img_item_path)
                img = transforms(img)
                img_label = 1

                return img,img_label

            else:  #tar == 'n'
                img_item_path = os.path.join(root_path, 'n', n_img_name[idx])
                img = Image.open(img_item_path)
                img = transforms(img)

                img_label = 0

                return img,img_label

        else:              #flag == test  需要输出 img img_label、img_name  (放在excel中)
            if tar == 'g':
                img_item_path = os.path.join(root_path,'g',g_img_name[idx])
                img = Image.open(img_item_path)
                img = transforms(img)
                img_label = 1

                return img,img_label,g_img_name[idx]

            else:  #tar == 'n'
                img_item_path = os.path.join(root_path, 'n', n_img_name[idx])
                img = Image.open(img_item_path)
                img = transforms(img)

                img_label = 0
                return img,img_label,n_img_name[idx]








class mf_data_seg_A_tocla(Dataset):
    def __init__(self,root_path,flag = '',tar = '',transforms = None):
        assert flag in ['train','val','test']

        self.root_path = root_path
        self.flag = flag
        self.tar = tar
        self.transforms = transforms

        self.g_img_dir = os.path.join(self.root_path,'g')
        self.g_img_list = os.listdir(self.g_img_dir)
        self.n_img_dir = os.path.join(self.root_path,'n')
        self.n_img_list = os.listdir(self.n_img_dir)


    def __len__(self):
        tar = self.tar
        if tar == 'g':
            return len(self.g_img_list)
        else:
            return len(self.n_img_list)


    def __getitem__(self, idx):
        transforms = self.transforms
        flag = self.flag
        root_path = self.root_path

        g_img_name = self.g_img_list
        n_img_name = self.n_img_list
        #img_name = self.img_list[idx]

        tar = self.tar


        if flag != 'test':       #在train中加入数据增强！！
            if tar == 'g':
                img_item_path = os.path.join(root_path, 'g', g_img_name[idx])
                # label_item_path = os.path.join(self.root_path,'g',g_img_name)       #label不需要，可以和img先读同一张，之后不用lable
                img = cv2.imread(img_item_path).copy()  # [:, :, ::-1] BGR -> RGB
                # #label = cv2.imread(label_item_path,cv2.IMREAD_GRAYSCALE).copy() # [:, :, ::-1] BGR -> RGB
                label = img

                transformed = transforms(image=img, mask=label)
                img = transformed['image']
                label = transformed['mask']
                img = (img / 255.0).to(torch.float32)
                target = 1
                return img, target

            else:  #tar == 'n'
                img_item_path = os.path.join(root_path, 'n', n_img_name[idx])
                img = cv2.imread(img_item_path).copy()  # [:, :, ::-1] BGR -> RGB
                label = img

                transformed = transforms(image=img, mask=label)
                img = transformed['image']
                label = transformed['mask']
                img = (img / 255.0).to(torch.float32)
                target = 0
                return img, target

        else:     #falg = test ,  无数据增强          # 人眼看一下分割情况,需要图片名字用以保存预测结果
            if tar == 'g':
                img_item_path = os.path.join(root_path, 'g', g_img_name[idx])
                img = cv2.imread(img_item_path).copy()  # [:, :, ::-1] BGR -> RGB
                label = img

                transformed = self.transforms(image=img, mask=label)
                img = transformed['image']
                label = transformed['mask']
                img = (img / 255.0).to(torch.float32)
                target = 1
                return img, target, g_img_name[idx]

            else:  # tar == 'n'
                img_item_path = os.path.join(root_path, 'n', n_img_name[idx])
                img = cv2.imread(img_item_path).copy()  # [:, :, ::-1] BGR -> RGB
                label = img

                transformed = transforms(image=img, mask=label)
                img = transformed['image']
                label = transformed['mask']
                img = (img / 255.0).to(torch.float32)
                target = 0
                return img, target, n_img_name[idx]


def evaluate_model(model, dataloader):
    all_labels = []
    all_predictions = []
    model.eval()
    with torch.no_grad():
        for data in dataloader:
                if len(data) == 3:  # 对应测试集
                    img, label, _ = data  # 解包三个值，忽略文件名
                else:
                    img, label = data  # 训练集或验证集，解包两个值
                img, label = img.to(device), label.to(device)  # 移动到设备
                pred = model(img)
                pred_np = pred.detach().cpu().numpy()  # 转换为 NumPy 数组
                label_np = label.detach().cpu().numpy()  # 转换为 NumPy 数组
                pred_target = pred_np[:, :, target_pixel[0], target_pixel[1]]
                label_target = label_np[:, :, target_pixel[0], target_pixel[1]]
                #print(label_target.sum())
                from sklearn.preprocessing import MinMaxScaler

                # 假设pred_target是您的预测结果
                pred_target_flattened = pred_target.flatten()

                # 创建MinMaxScaler对象
                scaler = MinMaxScaler()

                # 将数据归一化到0和1之间
                pred_target_normalized = scaler.fit_transform(pred_target_flattened.reshape(-1, 1)).flatten()

                # 将归一化后的数据添加到all_predictions列表
                all_predictions.extend(pred_target_normalized)
                all_labels.extend(label_target.flatten())      # 目标点真实标签
        # 使用 ROC 曲线计算敏感性和特异性
        fpr, tpr, thresholds = roc_curve(all_labels, all_predictions)
        sensitivities = tpr
        specificities = 1 - fpr

        # 找到敏感性和特异性最接近的点
        avg_diff =abs (sensitivities - specificities)
        best_index = avg_diff.argmin()
        best_sensitivity = sensitivities[best_index]
        best_specificity = specificities[best_index]
        return best_sensitivity, best_specificity



