import os
import numpy as np
import nibabel as nib
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
import skimage.transform
import matplotlib.pyplot as plt
import cv2
from torchvision import transforms

# 新增标准化函数
def normalize_image(image, min_val=None, max_val=None):
    """将图像标准化到0-1范围，兼容单通道和三通道"""
    if min_val is None:
        min_val = image.min()
    if max_val is None:
        max_val = image.max()
    
    # 处理除数为0的情况
    range_val = max_val - min_val
    if range_val < 1e-10:
        if abs(max_val) < 1e-10:
            return np.zeros_like(image)
        return np.ones_like(image)
    
    normalized = (image - min_val) / range_val
    return np.clip(normalized, 0, 1).astype(np.float32)

class MRISliceDataset(Dataset):
    def __init__(self, root_dir, modalities=('axi', 'sag', 'cor'), crop_size=224, augment=None):
        self.root_dir = root_dir
        self.modalities = modalities
        self.crop_size = crop_size
        self.augment = augment
        
        # 定义ImageNet标准化转换
        self.normalize_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], 
                                 std=[0.229, 0.224, 0.225])
        ])
        
        # 读取标签
        self.labels = pd.read_csv(os.path.join(root_dir, 'label.csv'),
                         header=0,
                         names=['p_id', 'label']).set_index('p_id')
        self.labels['label'] = self.labels['label'].astype(int) - 1  # 转换为0-based
        
        # 筛选有效患者
        self.patient_ids = []
        for pid in self.labels.index:
            if self._check_data_complete(pid):
                self.patient_ids.append(pid)

    def _check_data_complete(self, pid):
        for mod in self.modalities:
            img_path = os.path.join(self.root_dir, 'img', f'{pid}_T2_{mod}_000.nii.gz')
            roi_path = os.path.join(self.root_dir, 'roi', f'{pid}_T2_{mod}_roi.nii.gz')
            
            # 检查文件是否存在
            if not (os.path.exists(img_path) and os.path.exists(roi_path)):
                return False
            
            try:
                # 加载图像和ROI的header获取形状
                img = nib.load(img_path)
                roi = nib.load(roi_path)
            except Exception as e:
                print(f"加载文件失败：{img_path} 或 {roi_path}, 错误：{e}")
                return False
            
            # 获取前两维的尺寸（假设为H, W）
            img_shape = img.shape[:2]
            roi_shape = roi.shape[:2]
            
            # 检查图像和ROI尺寸是否匹配
            if img_shape != roi_shape:
                print(f"患者 {pid} 的 {mod}方位图像尺寸 {img_shape} 与 ROI 尺寸 {roi_shape} 不匹配")
                return False
            
            # 检查长宽比是否合规（例如不超过1.5:1）
            max_allowed_ratio = 1.5
            h, w = img_shape
            if h == 0 or w == 0:
                print(f"患者 {pid} 的 {mod} 方位存在零尺寸：{img_shape}")
                return False
            current_ratio = max(h / w, w / h)
            if current_ratio > max_allowed_ratio:
                print(f"患者 {pid} 的 {mod} 方位长宽比过大：{img_shape} (比例 {current_ratio:.2f})")
                return False
            
        return True

    def _crop_around_roi(self, image, roi):
        """ROI中心裁剪并适配ResNet输入"""
        coords = np.argwhere(roi > 0)
        if len(coords) == 0:
            return np.zeros((self.crop_size, self.crop_size), dtype=np.float32)
        
        # 计算ROI边界和中心
        min_y, min_x = coords.min(axis=0)
        max_y, max_x = coords.max(axis=0)
        center_y = (min_y + max_y) // 2
        center_x = (min_x + max_x) // 2
        
        # 确定裁剪尺寸
        half_size = self.crop_size // 2
        
        # 计算裁剪范围
        y_start = max(0, center_y - half_size)
        y_end = min(image.shape[0], center_y + half_size)
        x_start = max(0, center_x - half_size)
        x_end = min(image.shape[1], center_x + half_size)
        
        # 执行裁剪
        cropped = image[y_start:y_end, x_start:x_end]
        
        # 边界填充
        pad_y_before = max(0, half_size - (center_y - y_start))
        pad_y_after = max(0, half_size - (y_end - center_y))
        pad_x_before = max(0, half_size - (center_x - x_start))
        pad_x_after = max(0, half_size - (x_end - center_x))
        
        padded = np.pad(cropped,
                       ((pad_y_before, pad_y_after),
                        (pad_x_before, pad_x_after)),
                       mode='constant',
                       constant_values=image.min())
    
        return padded.astype(np.float32)

    def __len__(self):
        return len(self.patient_ids)

    def __getitem__(self, idx):
        pid = self.patient_ids[idx]
        label = self.labels.loc[pid, 'label']
        
        # 为每个模态独立存储数据
        img_data = []
        roi_data = []
        valid_flags = []

        axi_image, sag_image, cor_image = None, None, None

        for mod in self.modalities:
            # 加载原始数据
            img_nii = nib.load(os.path.join(self.root_dir, 'img', f'{pid}_T2_{mod}_000.nii.gz'))
            roi_nii = nib.load(os.path.join(self.root_dir, 'roi', f'{pid}_T2_{mod}_roi.nii.gz'))
            
            img = img_nii.get_fdata().astype(np.float32)
            roi = roi_nii.get_fdata().astype(np.float32)

            # 寻找ROI占比最大的切片
            max_ratio = -1.0
            best_slice_idx = 0
            for slice_idx in range(img.shape[2]):
                roi_slice = roi[:, :, slice_idx]
                roi_ratio = np.sum(roi_slice > 0) / roi_slice.size
                if roi_ratio > max_ratio:
                    max_ratio = roi_ratio
                    best_slice_idx = slice_idx

            # 获取最佳切片
            img_slice = img[:, :, best_slice_idx]
            roi_slice = roi[:, :, best_slice_idx]
            
            # 归一化
            img_slice = normalize_image(img_slice)
            
            # ROI裁剪

            img_cropped = self._crop_around_roi(img_slice, roi_slice)
            
            # 添加类型转换保证float32
            img_cropped = img_cropped.astype(np.float32)  # 确保基础图像为float32
            
            # edges = cv2.Canny((img_cropped*255).astype(np.uint8), 50, 150).astype(np.float32)/255.0
            # sobelx = cv2.Sobel(img_cropped, cv2.CV_32F, 1, 0, ksize=3)  # 使用CV_32F替代CV_64F
            # sobely = cv2.Sobel(img_cropped, cv2.CV_32F, 0, 1, ksize=3)
            # gradient_magnitude = (sobelx**2 + sobely**2)**0.5
            # gradient_magnitude = gradient_magnitude.astype(np.float32)  # 转换为float32
            # img_triple = np.stack([
            #     img_cropped, 
            #     edges, 
            #     gradient_magnitude
            # ], axis=-1)

            # 合并通道

            # img_uint8 = (img_cropped * 255).astype(np.uint8)
            # colored = cv2.applyColorMap(img_uint8, cv2.COLORMAP_JET)
            # img_triple = cv2.cvtColor(colored, cv2.COLOR_BGR2RGB) / 255.0

            img_triple = np.stack([img_cropped]*3, axis=-1)  # 原始方法形状变为 (H, W, 3)
            
            # 应用标准化变换
            img_tensor = self.normalize_transform(img_triple)  # 形状 (3, H, W)
            
                        # 根据模态类型分配到对应变量
            if mod == 'axi':
                axi_image = img_tensor
            elif mod == 'sag':
                sag_image = img_tensor
            elif mod == 'cor':
                cor_image = img_tensor

            # 处理ROI
            roi_cropped = self._crop_around_roi(roi_slice, roi_slice)
            roi_tensor = torch.from_numpy(roi_cropped).unsqueeze(0)  # 形状 (1, H, W)
            
            # 存储处理后的数据
            img_data.append(img_tensor)
            roi_data.append(roi_tensor)
            valid_flags.append(1.0 if max_ratio > 0 else 0.0)
        
        return {
            'axi_image': axi_image,     # 轴位图像张量 (3, H, W)
            'sag_image': sag_image,     # 矢状位图像张量 (3, H, W)
            'cor_image': cor_image,     # 冠状位图像张量 (3, H, W)
            'rois': roi_data,           # 列表包含三个模态: [(1, H, W), (1, H, W), (1, H, W)]
            'valid_flags': torch.tensor(valid_flags, dtype=torch.float32),
            'label': torch.tensor(label, dtype=torch.long),
            'patient_id': pid
        }

def test_dataloader_and_visualize(root_dir='./data', batch_size=2):
    dataset = MRISliceDataset(root_dir=root_dir)
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

    batch = next(iter(dataloader))
    sample_idx = 0

    axi_roi = batch['rois'][0][sample_idx].squeeze().numpy()  # 轴位ROI (H, W)
    sag_roi = batch['rois'][1][sample_idx].squeeze().numpy()  # 矢状位ROI
    cor_roi = batch['rois'][2][sample_idx].squeeze().numpy() 

    # 获取三个方位的数据
    axi_img = batch['axi_image'][sample_idx].numpy()
    sag_img = batch['sag_image'][sample_idx].numpy()
    cor_img = batch['cor_image'][sample_idx].numpy()
    
    fig, axes = plt.subplots(4, 3, figsize=(15, 16))

    modalities = ['Axial', 'Sagittal', 'Coronal']
    images = [axi_img, sag_img, cor_img]
    
    for i in range(3):
        # 将张量转换为显示格式
        img_display = np.transpose(images[i], (1, 2, 0))
        img_display = img_display * np.array([0.229, 0.224, 0.225]) + np.array([0.485, 0.456, 0.406])
        img_display = np.clip(img_display, 0, 1)
        
        # 显示三个通道(RGB)分别对应三张图
        for j in range(3):
            axes[i, j].imshow(img_display[:, :, j], cmap='gray')
            axes[i, j].set_title(f"{modalities[i]} - Channel {j+1}", fontsize=9)
            axes[i, j].axis('off')
    
    for i, (mod_img, mod_roi) in enumerate(zip([axi_img, sag_img, cor_img], 
                                             [axi_roi, sag_roi, cor_roi])):
        # 将ROI扩展到通道维度
        roi_expanded = mod_roi[None, :, :]  # 形状 (1, H, W)
        
        # ROI与图像相乘
        masked_img = mod_img * roi_expanded
        
        # 反标准化处理
        masked_display = np.transpose(masked_img, (1, 2, 0))
        masked_display = masked_display * np.array([0.229, 0.224, 0.225]) + np.array([0.485, 0.456, 0.406])
        masked_display = np.clip(masked_display, 0, 1)
        
        # 显示原始图像、ROI和融合结果
        axes[3, i].imshow(masked_display[:, :, 0], cmap='gray')  # 显示第一个通道
        axes[3, i].set_title(f"{modalities[i]} ROI zone", fontsize=9)
        axes[3, i].axis('off')

    plt.suptitle(f"Sample Label: {batch['label'][sample_idx].item()}\n"
                 f"Patient ID: {batch['patient_id'][sample_idx]}\n"
                 f"ROI zone", y=0.98)
    plt.tight_layout()
    plt.show()

if __name__ == "__main__":
    test_dataloader_and_visualize(root_dir='D:/PyChrom/PythonProject/2.medical_image/DeepTrip/datasets/pT/three_label_data/train')