import os
import numpy as np
import pandas as pd
import nibabel as nib
import cv2
import torch
from torch.utils.data import Dataset
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec

class MedicalDataset(Dataset):
    def __init__(self, data_path, phase='train', scaler=None, ori_img=0.6, image_size=112):
        """
        phase: 数据集阶段，可选 'train', 'val', 'test'
        """
        self.phase = phase
        self.data_dir = os.path.join(data_path, phase)
        self.ori_img = ori_img
        self.label_df = pd.read_csv(os.path.join(self.data_dir, 'label.csv'))
        self.labels = (self.label_df['label'] - 1).tolist()
        self.image_size = image_size
        self.modalities = ['axi', 'sag', 'cor']  # 三种扫描方位
        
        if scaler is None and phase == 'train':
            self.scaler = StandardScaler()
        else:
            self.scaler = scaler

    def load_nifti(self, p_id, modality):
        """
        加载并处理单个模态的nifti文件
        返回值:
        slices: 包含三张切片的列表 [slice_before, main_slice, slice_after]
        每张切片维度: (224, 224); [0,1]
        """
        p_id = str(p_id)
        img_path = os.path.join(self.data_dir, "img", f"{p_id}_T2_{modality}_000.nii.gz")
        roi_path = os.path.join(self.data_dir, "roi", f"{p_id}_T2_{modality}_roi.nii.gz")
        
        # 加载nifti文件
        img = nib.load(img_path).get_fdata().astype(np.float32)
        roi = nib.load(roi_path).get_fdata().astype(np.uint8)
        
        # 找到包含最大ROI的切片
        roi_sums = roi.sum(axis=(0, 1))
        main_slice_idx = np.argmax(roi_sums)
        
        # 获取三张切片索引（主切片及相邻切片）
        slice_indices = [
            max(0, main_slice_idx - 1),
            main_slice_idx,
            min(img.shape[2] - 1, main_slice_idx + 1)
        ]
        
        processed_slices = []
        for slice_idx in slice_indices:
            # 提取切片
            img_slice = img[..., slice_idx]
            roi_slice = roi[..., slice_idx]
            
            # 处理ROI边界
            rows = np.any(roi_slice, axis=1)
            cols = np.any(roi_slice, axis=0)
            ymin, ymax = np.where(rows)[0][[0, -1]] if rows.any() else (0, img_slice.shape[0])
            xmin, xmax = np.where(cols)[0][[0, -1]] if cols.any() else (0, img_slice.shape[1])

            # 扩展边界
            height = ymax - ymin
            width = xmax - xmin
            ymin = max(0, int(ymin - 0.2 * height))
            ymax = min(img_slice.shape[0], int(ymax + 0.2 * height))
            xmin = max(0, int(xmin - 0.2 * width))
            xmax = min(img_slice.shape[1], int(xmax + 0.2 * width))

            # 裁剪ROI区域
            cropped_img = img_slice[ymin:ymax, xmin:xmax]
            cropped_roi = roi_slice[ymin:ymax, xmin:xmax]

            # 保持长宽比的缩放
            h, w = cropped_img.shape
            scale_ratio = min(self.image_size/h, self.image_size/w)
            new_h, new_w = int(h * scale_ratio), int(w * scale_ratio)
            
            scaled_img = cv2.resize(cropped_img, (new_w, new_h), 
                                   interpolation=cv2.INTER_LINEAR)
            scaled_roi = cv2.resize(cropped_roi.astype(np.uint8), (new_w, new_h),
                                   interpolation=cv2.INTER_NEAREST)

            # 填充到目标尺寸
            pad_top = (self.image_size - new_h) // 2
            pad_bottom = self.image_size - new_h - pad_top
            pad_left = (self.image_size - new_w) // 2
            pad_right = self.image_size - new_w - pad_left
            padded_img = cv2.copyMakeBorder(scaled_img, pad_top, pad_bottom,
                                           pad_left, pad_right, cv2.BORDER_CONSTANT, 
                                           value=np.percentile(scaled_img, 5))
            padded_roi = cv2.copyMakeBorder(scaled_roi, pad_top, pad_bottom,
                                           pad_left, pad_right, cv2.BORDER_CONSTANT,
                                           value=0)

            # ROI加权增强
            kernel = np.ones((3, 3), np.uint8)
            eroded_roi = cv2.erode(padded_roi, kernel, iterations=1)
            boundary_mask = (padded_roi - eroded_roi).astype(np.float32) 
            
            roi_weighted = padded_img * (self.ori_img + 0.5*(padded_roi > 0) + 0.4 * boundary_mask)
            
            # 直方图归一化
            roi_pixels = roi_weighted[padded_roi > 0]
            if len(roi_pixels) > 0:
                p2, p98 = np.percentile(roi_pixels, (2, 98))
                roi_weighted = np.clip((roi_weighted - p2) / (p98 - p2 + 1e-8), 0, 1)
            
            processed_slices.append(roi_weighted.astype(np.float32))
        
        return processed_slices
    
    def __len__(self):
        return len(self.label_df)

    def __getitem__(self, idx):
        row = self.label_df.iloc[idx]
        p_id = row['p_id']
        
        # 存储所有模态和切片的数据 [3模态, 3切片, H, W]
        all_modality_slices = []
        
        for modality in self.modalities:
            slices = self.load_nifti(p_id, modality)
            
            # 对每个切片应用数据增强
            augmented_slices = []
            for img in slices:
                if self.phase == 'train':
                    # 随机水平翻转
                    if np.random.rand() < 0.5:
                        img = cv2.flip(img, 1)
                    
                    # 随机旋转
                    angle = np.random.uniform(-15, 15)
                    M = cv2.getRotationMatrix2D((self.image_size//2, self.image_size//2), angle, 1.0)
                    img = cv2.warpAffine(img, M, (self.image_size, self.image_size),
                                        flags=cv2.INTER_LINEAR,
                                        borderMode=cv2.BORDER_REFLECT)
                    
                    # 随机亮度调整
                    brightness = np.random.uniform(0.8, 1.2)
                    img = np.clip(img * brightness, 0, 1)
                    
                    # 添加高斯噪声
                    if np.random.rand() < 0.5:
                        noise = np.random.normal(0, 0.03, img.shape).astype(np.float32)
                        img = np.clip(img + noise, 0, 1)
                    
                    # 随机高斯模糊
                    if np.random.rand() < 0.3:
                        ksize = np.random.choice([3, 5])
                        img = cv2.GaussianBlur(img, (ksize, ksize), 0)
                
                augmented_slices.append(img)
            
            all_modality_slices.append(augmented_slices)
        
        # 转换为张量 [3模态, 3切片, H, W] -> [3, 3, 224, 224]
        tensor_slices = torch.tensor(np.array(all_modality_slices), dtype=torch.float32)
        
        return {
            'images': torch.FloatTensor(tensor_slices),  # [3, 3, 224, 224]
            'labels': row['label'] - 1,
            'p_id': p_id
        }
    
    def visualize_sample(self, idx, save_path=None):
        """
        可视化一个样本的三个模态的三张切片
        
        参数:
            idx: 样本索引
            save_path: 可选，保存图像的文件路径
        """
        # 获取样本数据
        sample = self.__getitem__(idx)
        images = sample['image'].numpy()  # [3, 3, H, W]
        label = sample['label']
        p_id = sample['p_id']
        
        # 创建可视化图像
        fig = plt.figure(figsize=(15, 10))
        fig.suptitle(f"Patient ID: {p_id}, Label: {label}", fontsize=16)
        
        # 创建网格布局
        gs = GridSpec(3, 3, figure=fig, wspace=0.05, hspace=0.05)
        
        # 定义模态名称和切片位置
        modalities = ['Axial', 'Sagittal', 'Coronal']
        slice_positions = ['Before', 'Main', 'After']
        
        # 绘制所有切片
        for mod_idx, modality in enumerate(modalities):
            for slice_idx, position in enumerate(slice_positions):
                ax = fig.add_subplot(gs[mod_idx, slice_idx])
                
                # 获取当前切片
                img = images[mod_idx, slice_idx]
                
                # 显示图像
                ax.imshow(img, cmap='gray')
                ax.axis('off')
                
                # 添加标题（只在第一行和第一列添加）
                if mod_idx == 0:
                    ax.set_title(f"{position} Slice", fontsize=12)
                if slice_idx == 0:
                    ax.text(-0.2, 0.5, modality, fontsize=12, 
                            rotation=90, va='center', ha='center',
                            transform=ax.transAxes)
        
        plt.tight_layout()
        
        # 保存或显示图像
        if save_path:
            os.makedirs(os.path.dirname(save_path), exist_ok=True)
            plt.savefig(save_path, bbox_inches='tight', dpi=150)
            plt.close()
            print(f"可视化结果已保存至: {save_path}")
        else:
            plt.show()

if __name__ == "__main__":
    import random
    #生成一个随机数
    random_number = random.randint(0, 1000)
    data_path = "./datasets/pT/three_label_data"
    dataset = MedicalDataset(data_path, phase='train')
    dataset.visualize_sample(random_number)

    # dataset.visualize_sample(4, save_path="./visualization/sample_4.png")
