import os
import glob
import nibabel as nib
import numpy as np
import torch
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import torch

class MRIDataset(Dataset):
    def __init__(self, root_dir, phase='train',crop_size=64, transform=None):
        """
        Args:
            root_dir (string): 根目录路径 (pt)
            phase (string): 数据阶段 (train/val/test)
            transform (callable, optional): 可选的变换操作
        """
        self.crop_size = crop_size
        self.root_dir = root_dir
        self.phase = phase
        self.transform = transform
        self.img_dir = os.path.join(root_dir, phase, 'img')
        self.roi_dir = os.path.join(root_dir, phase, 'roi')
        self.label_dir = os.path.join(root_dir, phase)  # CSV文件夹
        
        # 按患者ID组织数据
        self.patient_data = defaultdict(dict)
        
        # 收集所有ROI文件路径
        roi_files = glob.glob(os.path.join(self.roi_dir, '*.nii.gz'))
        
        for roi_path in roi_files:
            # 从ROI文件名解析患者ID和扫描方向
            roi_filename = os.path.basename(roi_path)
            parts = roi_filename.split('_')
            p_id_str = parts[0]  # 患者ID作为字符串
            
            # 尝试将患者ID转换为整数
            try:
                p_id_int = int(p_id_str)
            except ValueError:
                print(f"Warning: Invalid patient ID format: {p_id_str}")
                continue
                
            scan_direction = parts[2]  # 例如: axi, sag, cor
            
            # 构建对应的图像文件路径模式
            img_pattern = os.path.join(self.img_dir, f"{p_id_str}_T2_{scan_direction}_*.nii.gz")
            img_files = sorted(glob.glob(img_pattern))
            
            if not img_files:
                print(f"Warning: No image files found for {roi_filename}")
                continue
                
            # 存储该方向的数据
            self.patient_data[p_id_int][scan_direction] = {
                'roi_path': roi_path,
                'img_files': img_files,
                'p_id_str': p_id_str  # 同时保存字符串形式的ID
            }
        
        # 加载标签数据 (CSV格式)
        self.label_df = self._load_labels()
        
        # 构建样本列表（包含所有三个方向的患者）
        self.samples = []
        for p_id_int, directions in self.patient_data.items():
            # 确保患者有三个方向的数据
            if set(directions.keys()) == {'axi', 'sag', 'cor'}:
                # 检查患者是否有标签
                if p_id_int in self.label_df.index:
                    self.samples.append({
                        'p_id_int': p_id_int,
                        'p_id_str': directions['axi']['p_id_str'],  # 任意方向获取字符串ID
                        'axi': directions['axi'],
                        'sag': directions['sag'],
                        'cor': directions['cor'],
                        'label': self.label_df.loc[p_id_int, 'label']  # 获取label列的值
                    })
                else:
                    print(f"Warning: Missing label for patient {p_id_int}")
            else:
                print(f"Warning: Patient {p_id_int} missing one or more scan directions")

    def _load_labels(self):
        """加载所有标签CSV文件到一个DataFrame"""
        label_files = glob.glob(os.path.join(self.label_dir, '*.csv'))
        if not label_files:
            raise FileNotFoundError(f"No CSV files found in {self.label_dir}")
        
        # 读取所有CSV文件并合并
        dfs = []
        for file in label_files:
            df = pd.read_csv(file)
            if 'p_id' not in df.columns or 'label' not in df.columns:
                raise ValueError(f"CSV file {file} must contain 'p_id' and 'label' columns")
            
            # 确保p_id列是整数类型
            df['p_id'] = df['p_id'].astype(int)
            df.set_index('p_id', inplace=True)
            dfs.append(df)
        
        # 合并所有DataFrame
        label_df = pd.concat(dfs)
        return label_df

    def __len__(self):
        return len(self.samples)

    def __getitem__(self, idx):
        sample = self.samples[idx]
        
        # 处理每个扫描方向
        direction_images = []
        for direction in ['axi', 'sag', 'cor']:
            direction_data = sample[direction]
            roi_path = direction_data['roi_path']
            img_files = direction_data['img_files']
            
            # 加载ROI数据 (3D体积)
            roi_nii = nib.load(roi_path)
            roi_data = roi_nii.get_fdata().astype(np.uint8)
            
            # 加载完整的3D图像数据
            # 首先需要确定所有切片是否属于同一个3D体积
            # 假设所有切片文件属于同一个3D体积，按文件名排序
            img_volumes = []
            for img_path in sorted(img_files):
                img_nii = nib.load(img_path)
                img_data = img_nii.get_fdata().astype(np.float32)
                img_data = np.squeeze(img_data)  # 关键修复：移除单维度通道
                img_volumes.append(img_data)
            
            # 堆叠所有切片形成3D体积
            img_3d = np.stack(img_volumes, axis=-1)  # 形状: (H, W, N)
            
            # 计算每个切片的肿瘤面积
            slice_areas = []
            for i in range(roi_data.shape[2]):
                slice_mask = roi_data[:, :, i]
                area = np.sum(slice_mask > 0)
                slice_areas.append(area)
            
            # 找到肿瘤面积最大的切片索引
            max_area_idx = np.argmax(slice_areas)
            
            # 选择三张切片：最大面积切片及其相邻切片
            slice_indices = []
            if max_area_idx > 0:
                slice_indices.append(max_area_idx - 1)  # 上一张切片
            slice_indices.append(max_area_idx)          # 最大面积切片
            if max_area_idx < roi_data.shape[2] - 1:
                slice_indices.append(max_area_idx + 1)  # 下一张切片
            
            # 提取选中的图像切片
            slice_images = []
            for slice_idx in slice_indices:
                # 检查切片索引是否在有效范围内
                if slice_idx < img_3d.shape[2]:
                    img_slice = img_3d[:, :, slice_idx]
                    
                    # 获取当前切片的ROI掩模
                    slice_mask = roi_data[:, :, slice_idx]
                    
                    # 计算肿瘤质心 (欧几里得中心)
                    positions = np.where(slice_mask > 0)
                    if len(positions[0]) > 0:
                        centroid = [np.mean(positions[0]), np.mean(positions[1])]
                    else:
                        # 如果没有ROI，使用图像中心
                        centroid = [img_slice.shape[0]//2, img_slice.shape[1]//2]
                    cy, cx = int(round(centroid[0])), int(round(centroid[1]))
                    
                    # 裁剪224x224图像块
                    crop_size = self.crop_size
                    start_y = max(0, cy - crop_size//2)
                    start_x = max(0, cx - crop_size//2)
                    end_y = min(img_slice.shape[0], cy + crop_size//2)
                    end_x = min(img_slice.shape[1], cx + crop_size//2)
                    
                    # 处理边界情况
                    crop = np.zeros((crop_size, crop_size), dtype=np.float32)
                    crop_start_y = max(0, crop_size//2 - cy)
                    crop_start_x = max(0, crop_size//2 - cx)
                    
                    valid_h = end_y - start_y
                    valid_w = end_x - start_x
                    
                    crop[crop_start_y:crop_start_y+valid_h, crop_start_x:crop_start_x+valid_w] = \
                        img_slice[start_y:end_y, start_x:end_x].squeeze()
                    
                    slice_images.append(crop)
                else:
                    # 切片索引超出范围，使用零图像
                    print(f"Warning: Slice index {slice_idx} out of range for patient {sample['p_id_str']}, direction {direction}")
                    slice_images.append(np.zeros((crop_size, crop_size), dtype=np.float32))
            
            # 如果不足三张切片，用零填充
            while len(slice_images) < 3:
                slice_images.append(np.zeros((crop_size, crop_size), dtype=np.float32))
            
            # 堆叠该方向的切片 (3, crop_size, crop_size)
            direction_images.append(np.stack(slice_images, axis=0))
        
        # 堆叠所有方向形成5D张量 (3方向, 3切片, 224, 224)
        multi_direction_image = np.stack(direction_images, axis=0)
        
        # 应用变换（标准化和归一化）
        if self.transform:
            multi_direction_image = self.transform(multi_direction_image)
        
        # 获取标签（单值）
        label = torch.tensor(sample['label'] - 1.0, dtype=torch.long)
        pid = 1
        return multi_direction_image, label,pid

# 自定义转换函数处理4D数据
class ToTensor4D:
    def __call__(self, array):
        # 将numpy数组转换为torch张量
        tensor = torch.from_numpy(array).float()
        # 添加通道维度：(3方向, 3切片, H, W) -> (3方向, 3切片, H, W)
        return tensor

# 定义数据变换
transform = transforms.Compose([
    ToTensor4D(),  # 转换为4D张量
    transforms.Lambda(lambda x: x / x.max() if x.max() > 0 else x),  # 归一化到[0,1]
    transforms.Normalize(mean=[0.5], std=[0.5])  # 标准化到[-1,1]
])

# 创建数据集和数据加载器
def create_data_loaders(root_dir, batch_size=8):
    phases = ['train', 'val', 'test']
    dataloaders = {}
    
    for phase in phases:
        dataset = MRIDataset(
            root_dir=root_dir,
            phase=phase,
            transform=transform
        )
        
        dataloaders[phase] = DataLoader(
            dataset,
            batch_size=batch_size,
            shuffle=(phase == 'train'),
            pin_memory=True
        )
    
    return dataloaders

def visualize_mri_sample(images, labels, sample_idx=0, modality_idx=0):
    """
    可视化MRI样本的三个方向切片
    
    参数:
    images (torch.Tensor): 图像张量，形状为 [batch_size, 3方向, 3切片, 224, 224]
    labels (torch.Tensor): 标签张量，形状为 [batch_size]
    sample_idx (int): 要可视化的批次中的样本索引
    modality_idx (int): 要可视化的模态索引 (0:axi, 1:sag, 2:cor)
    """
    # 获取指定样本的图像数据
    sample_images = images[sample_idx]
    
    # 反标准化图像以便可视化
    # 因为我们使用了 Normalize(mean=[0.5], std=[0.5])
    sample_images = sample_images * 0.5 + 0.5
    
    # 转换为numpy数组并移除单维度
    sample_images = sample_images.cpu().numpy().squeeze()
    
    # 获取标签
    label = labels[sample_idx].item()
    
    # 创建图表
    fig, axes = plt.subplots(1, 3, figsize=(15, 5))
    fig.suptitle(f"Sample {sample_idx} - Label: {label:.2f} - Modality: {['axi', 'sag', 'cor'][modality_idx]}", fontsize=16)
    
    # 显示指定模态的三张切片
    for i in range(3):
        slice_img = sample_images[modality_idx, i]
        axes[i].imshow(slice_img, cmap='gray', vmin=0, vmax=1)
        axes[i].set_title(f"Slice {i+1}")
        axes[i].axis('off')
    
    plt.tight_layout()
    plt.show()

# 在之前的测试代码中使用可视化方法
if __name__ == "__main__":
    root_dir = 'D:/PyChrom/PythonProject/2.medical_image/DeepTrip/datasets/pT/three_label_data/'
    dataloaders = create_data_loaders(root_dir, batch_size=2)
    
    # 获取一个批次的数据
    for images, labels in dataloaders['train']:
        print(f"Batch shape: {images.shape}, Labels: {labels}")
        
        # 可视化第一个样本的第一个模态（axi）
        visualize_mri_sample(images, labels, sample_idx=0, modality_idx=0)
        
        # 可视化第一个样本的第二个模态（sag）
        visualize_mri_sample(images, labels, sample_idx=0, modality_idx=1)
        
        # 可视化第一个样本的第三个模态（cor）
        visualize_mri_sample(images, labels, sample_idx=0, modality_idx=2)
        break

    for images, labels in dataloaders['train']:
        print(f"Batch shape: {images.shape}, Labels: {labels}")
        print(f"Image range: min={images.min().item():.4f}, max={images.max().item():.4f}")
        break

        
