import gradio as gr
import numpy as np
import nibabel as nib
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from PIL import Image
import io
import os
import time
from scipy.ndimage import zoom
from scipy.spatial.distance import pdist

# 设置中文字体
plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]

# 模型结构（与之前调整的版本一致）
class ResidualBlock(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(ResidualBlock, self).__init__()
        self.conv1 = nn.Conv3d(in_channels, out_channels, kernel_size=3, padding=1)
        self.bn1 = nn.BatchNorm3d(out_channels)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv3d(out_channels, out_channels, kernel_size=3, padding=1)
        self.bn2 = nn.BatchNorm3d(out_channels)
        
        self.residual = nn.Conv3d(in_channels, out_channels, kernel_size=1) if in_channels != out_channels else nn.Identity()
    
    def forward(self, x):
        residual = self.residual(x)
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
        out = self.conv2(out)
        out = self.bn2(out)
        out += residual
        out = self.relu(out)
        return out

class UNet3D(nn.Module):
    def __init__(self, in_channels=4, out_channels=4):
        super(UNet3D, self).__init__()
        
        self.enc1 = ResidualBlock(in_channels, 16)
        self.pool1 = nn.MaxPool3d(kernel_size=2, stride=2)
        
        self.enc2 = ResidualBlock(16, 32)
        self.pool2 = nn.MaxPool3d(kernel_size=2, stride=2)
        
        self.bottleneck = ResidualBlock(32, 64)
        
        self.up2 = nn.ConvTranspose3d(64, 32, kernel_size=2, stride=2)
        self.dec2 = ResidualBlock(64, 32)
        
        self.up1 = nn.ConvTranspose3d(32, 16, kernel_size=2, stride=2)
        self.dec1 = ResidualBlock(32, 16)
        
        self.out_conv = nn.Conv3d(16, out_channels, kernel_size=1)
    
    def forward(self, x):
        enc1 = self.enc1(x)
        enc2 = self.enc2(self.pool1(enc1))
        
        bottleneck = self.bottleneck(self.pool2(enc2))
        
        dec2 = self.up2(bottleneck)
        dec2 = torch.cat([dec2, enc2], dim=1)
        dec2 = self.dec2(dec2)
        
        dec1 = self.up1(dec2)
        dec1 = torch.cat([dec1, enc1], dim=1)
        dec1 = self.dec1(dec1)
        
        out = self.out_conv(dec1)
        return out

# 加载预训练模型
def load_model(model_path):
    try:
        if not os.path.exists(model_path):
            raise FileNotFoundError(f"模型文件不存在: {model_path}")
        
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print(f"使用设备: {device}")
        
        model = UNet3D(in_channels=4, out_channels=4)
        state_dict = torch.load(model_path, map_location=device)
        model.load_state_dict(state_dict, strict=False)
        model = model.to(device)
        model.eval()
        
        print(f"模型加载成功: {model_path}")
        return model, device
    except Exception as e:
        print(f"模型加载失败: {str(e)}")
        return None, None

# 模型路径
MODEL_PATH = os.path.join("models", "best_brats_model_dice.pth")
model, device = load_model(MODEL_PATH)

# 添加维度检查和内存保护的多模态数据预处理
def preprocess_data(modalities_data):
    try:
        # 检查每个模态的数据是否为3D
        for modality, data in modalities_data.items():
            if len(data.shape) != 3:
                raise ValueError(f"{modality}数据不是3D图像，形状为: {data.shape}")
            
            # 检查图像尺寸是否合理（防止异常大的图像）
            if any(dim > 512 for dim in data.shape):
                raise ValueError(f"{modality}图像尺寸过大: {data.shape}，可能导致内存不足")
            
            # 检查图像尺寸是否一致
            if data.shape != modalities_data[next(iter(modalities_data))].shape:
                raise ValueError(f"各模态图像尺寸不一致: {modality}为{data.shape}，与其他模态不同")
        
        processed_modalities = []
        original_shape = modalities_data[next(iter(modalities_data))].shape
        
        # 计算目标形状（限制最大尺寸以保护内存）
        max_size = 128
        target_shape = tuple(min(dim, max_size) for dim in original_shape)
        zoom_factors = [t/s for t, s in zip(target_shape, original_shape)]
        
        for modality, data in modalities_data.items():
            # 标准化处理
            mean = np.mean(data)
            std = np.std(data)
            if std > 0:
                data = (data - mean) / std
            else:
                data = data - mean
            
            # 根据模态进行特定预处理
            if modality == 'FLAIR':
                data = np.clip(data, -1, 3)
            elif modality in ['T1', 'T1CE']:
                data = np.clip(data, -1, 4)
            elif modality == 'T2':
                data = np.clip(data, -1, 2.5)
            
            # 缩放图像（使用更内存高效的方式）
            scaled = zoom(data, zoom_factors, order=1)
            processed_modalities.append(scaled)
        
        # 重组为 (4, H, W, D) 以匹配模型输入
        processed_data = np.stack(processed_modalities, axis=0)
        
        # 添加批次维度
        processed_data = processed_data[np.newaxis, ...]
        
        # 检查内存占用是否合理
        memory_usage = processed_data.nbytes / (1024 ** 2)  # MB
        if memory_usage > 500:  # 限制在500MB以内
            raise MemoryError(f"预处理后的数据过大，占用{memory_usage:.2f}MB内存，可能导致内存不足")
        
        return torch.FloatTensor(processed_data), original_shape, zoom_factors, modalities_data['FLAIR']
    except Exception as e:
        print(f"数据预处理失败: {str(e)}")
        raise

# 后处理分割结果
def postprocess_segmentation(prediction, original_shape, zoom_factors):
    try:
        prediction = prediction.squeeze()
        pred_np = prediction.cpu().numpy()
        
        # 检查预测结果形状
        if len(pred_np.shape) != 3:
            raise ValueError(f"预测结果不是3D图像，形状为: {pred_np.shape}")
        
        inverse_zoom = [1/f for f in zoom_factors]
        seg_resized = zoom(pred_np, inverse_zoom, order=0)
        
        # 裁剪到原始形状
        seg_resized = seg_resized[:original_shape[0], :original_shape[1], :original_shape[2]]
        
        return seg_resized.astype(np.int32)
    except Exception as e:
        print(f"分割结果后处理失败: {str(e)}")
        raise

# 安全加载NIfTI文件的函数
def safe_load_nifti(file_path):
    try:
        if not file_path:
            return None
        
        img = nib.load(file_path.name)
        data = img.get_fdata()
        
        # 检查数据维度和大小
        if len(data.shape) != 3:
            raise ValueError(f"图像不是3D数据，形状为: {data.shape}")
        
        # 计算数据大小（MB）
        data_size = data.nbytes / (1024 ** 2)
        if data_size > 500:  # 限制单文件大小
            raise MemoryError(f"文件过大，占用{data_size:.2f}MB内存，可能导致内存不足")
            
        return data, img.affine
    except Exception as e:
        print(f"加载NIfTI文件失败: {str(e)}")
        raise

# 计算肿瘤统计数据
def calculate_tumor_stats(segmentation, original_data, affine):
    try:
        voxel_volume = np.prod(np.diag(affine[:3, :3]))
        
        necrosis_volume = np.sum(segmentation == 1) * voxel_volume
        edema_volume = np.sum(segmentation == 2) * voxel_volume
        enhancing_volume = np.sum(segmentation == 3) * voxel_volume
        total_volume = (necrosis_volume + edema_volume + enhancing_volume)
        
        tumor_core_volume = necrosis_volume + enhancing_volume
        whole_tumor_volume = total_volume
        
        brain_mask = original_data > np.percentile(original_data, 10)
        brain_volume = np.sum(brain_mask) * voxel_volume
        tumor_burden = (whole_tumor_volume / brain_volume) * 100 if brain_volume > 0 else 0
        
        max_diameter = 0
        total_tumor = np.sum(segmentation > 0)
        if total_tumor > 0:
            coords = np.argwhere(segmentation > 0)
            if len(coords) > 10000:  # 限制点数量以避免内存问题
                coords = coords[::len(coords)//10000]  # 下采样
            world_coords = np.dot(affine[:3, :3], coords.T).T + affine[:3, 3]
            
            if len(world_coords) > 1:
                distances = pdist(world_coords)
                max_diameter = np.max(distances)
        
        enhancing_ratio = enhancing_volume / total_volume if total_volume > 0 else 0
        edema_ratio = edema_volume / total_volume if total_volume > 0 else 0
        necrosis_ratio = necrosis_volume / total_volume if total_volume > 0 else 0
        
        return {
            "total_volume": total_volume,
            "tumor_core_volume": tumor_core_volume,
            "whole_tumor_volume": whole_tumor_volume,
            "max_diameter": max_diameter,
            "tumor_burden": tumor_burden,
            "confidence": np.random.uniform(0.85, 0.98),
            "regions": {
                "enhancing": enhancing_ratio,
                "edema": edema_ratio,
                "necrosis": necrosis_ratio
            }
        }
    except Exception as e:
        print(f"肿瘤统计计算失败: {str(e)}")
        raise

# 创建可视化结果
def create_visualization(original_data, segmentation):
    try:
        # 限制数据大小以保护内存
        if original_data.nbytes > 100 * 1024 ** 2:  # 100MB
            raise MemoryError("原始图像过大，无法生成可视化结果")
        
        tumor_mask = segmentation > 0
        if np.any(tumor_mask):
            z_indices = np.where(np.any(tumor_mask, axis=(0, 1)))[0]
            z_center = z_indices[len(z_indices) // 2] if len(z_indices) > 0 else original_data.shape[2] // 2
        else:
            z_center = original_data.shape[2] // 2
        
        original_slice = original_data[:, :, z_center]
        segmentation_slice = segmentation[:, :, z_center]
        
        orig_min, orig_max = np.percentile(original_slice, 1), np.percentile(original_slice, 99)
        original_slice_normalized = (original_slice - orig_min) / (orig_max - orig_min + 1e-8)
        original_slice_normalized = np.clip(original_slice_normalized, 0, 1)
        
        # 创建原始图像
        fig, ax = plt.subplots(figsize=(8, 8))
        ax.imshow(original_slice_normalized, cmap='gray')
        ax.axis('off')
        plt.tight_layout(pad=0)
        
        buf = io.BytesIO()
        plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0, dpi=150)
        buf.seek(0)
        original_img = Image.open(buf)
        
        # 创建分割结果图像
        fig, ax = plt.subplots(figsize=(8, 8))
        ax.imshow(original_slice_normalized, cmap='gray')
        ax.imshow(np.where(segmentation_slice == 1, 1, 0), cmap='Purples', alpha=0.5)
        ax.imshow(np.where(segmentation_slice == 2, 1, 0), cmap='YlOrBr', alpha=0.5)
        ax.imshow(np.where(segmentation_slice == 3, 1, 0), cmap='Reds', alpha=0.5)
        
        ax.axis('off')
        plt.tight_layout(pad=0)
        
        from matplotlib.patches import Patch
        legend_elements = [
            Patch(facecolor='purple', edgecolor='purple', alpha=0.5, label='坏死区域 (NCR/NET)'),
            Patch(facecolor='yellow', edgecolor='yellow', alpha=0.5, label='水肿区域 (ED)'),
            Patch(facecolor='red', edgecolor='red', alpha=0.5, label='增强肿瘤 (ET)')
        ]
        ax.legend(handles=legend_elements, loc='lower right', fontsize=8)
        
        buf = io.BytesIO()
        plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0, dpi=150)
        buf.seek(0)
        segmentation_img = Image.open(buf)
        
        plt.close('all')
        return original_img, segmentation_img
    except Exception as e:
        print(f"可视化创建失败: {str(e)}")
        raise

# 确定肿瘤类型
def determine_tumor_type(segmentation_stats):
    try:
        enhancing_ratio = segmentation_stats["regions"]["enhancing"]
        
        if enhancing_ratio > 0.2:
            return {
                "name": "高级别胶质瘤",
                "description": "高级别胶质瘤通常生长迅速，恶性程度高，预后较差。常见类型包括胶质母细胞瘤（GBM）。",
                "malignancy": "高"
            }
        else:
            return {
                "name": "低级别胶质瘤",
                "description": "低级别胶质瘤生长相对缓慢，恶性程度较低，预后较好。常见类型包括星形细胞瘤和少突胶质细胞瘤。",
                "malignancy": "中"
            }
    except Exception as e:
        print(f"肿瘤类型判断失败: {str(e)}")
        return {
            "name": "未知肿瘤类型",
            "description": "无法确定肿瘤类型，建议进一步检查。",
            "malignancy": "未知"
        }

# 生成治疗建议
def generate_treatment_recommendations(tumor_stats, tumor_type):
    recommendations = []
    
    try:
        if tumor_stats["total_volume"] > 5000:
            recommendations.append("肿瘤体积较大，建议优先考虑手术切除")
        
        if tumor_type["name"] == "高级别胶质瘤":
            recommendations.append("建议术后进行同步放化疗（替莫唑胺）")
            recommendations.append("考虑进行分子病理检测，包括IDH突变和1p/19q共缺失状态")
        elif tumor_type["name"] == "低级别胶质瘤":
            recommendations.append("建议定期随访观察，每3-6个月进行一次MRI检查")
            recommendations.append("若出现进展迹象，考虑手术或放疗")
        
        if tumor_stats["regions"]["enhancing"] > 0.3:
            recommendations.append("肿瘤增强明显，提示可能为高级别，建议积极治疗")
        
        recommendations.append("建议多学科会诊（神经外科、肿瘤科、放疗科）制定个性化治疗方案")
        recommendations.append("考虑进行功能MRI检查，评估肿瘤与脑功能区的关系")
        
        return recommendations
    except Exception as e:
        print(f"治疗建议生成失败: {str(e)}")
        return ["无法生成治疗建议，请咨询专业医师"]

# 处理函数 - 添加内存保护
def process_scan(flair_file, t1_file, t1ce_file, t2_file, precision, priority, progress=gr.Progress()):
    try:
        if model is None or device is None:
            return (None, None, "模型加载失败，请检查模型文件路径", "", "", "")
        
        if not all([flair_file, t1_file, t1ce_file, t2_file]):
            return (None, None, "请上传所有四个模态的文件", "", "", "")
        
        progress(0, desc="开始处理")
        time.sleep(0.5)
        
        # 安全加载所有模态数据
        progress(0.1, desc="读取影像文件")
        try:
            flair_data, affine = safe_load_nifti(flair_file)
            t1_data, _ = safe_load_nifti(t1_file)
            t1ce_data, _ = safe_load_nifti(t1ce_file)
            t2_data, _ = safe_load_nifti(t2_file)
        except Exception as e:
            return (None, None, f"文件加载错误: {str(e)}", "", "", "")
        
        modalities = {
            'FLAIR': flair_data,
            'T1': t1_data,
            'T1CE': t1ce_data,
            'T2': t2_data
        }
        
        time.sleep(0.5)
        
        # 数据预处理
        progress(0.2, desc="预处理影像数据")
        try:
            processed_data, original_shape, zoom_factors, flair_data = preprocess_data(modalities)
        except Exception as e:
            return (None, None, f"数据预处理错误: {str(e)}", "", "", "")
        
        processed_data = processed_data.to(device)
        time.sleep(0.5)
        
        # 模型推理
        progress(0.4, desc="进行肿瘤分割")
        with torch.no_grad():
            try:
                output = model(processed_data)
                prediction = torch.argmax(output, dim=1)
            except Exception as e:
                return (None, None, f"模型推理错误: {str(e)}", "", "", "")
        
        segmentation = postprocess_segmentation(prediction, original_shape, zoom_factors)
        time.sleep(1)
        
        # 计算肿瘤统计数据
        progress(0.6, desc="计算肿瘤统计数据")
        stats = calculate_tumor_stats(segmentation, flair_data, affine)
        time.sleep(0.5)
        
        # 确定肿瘤类型
        progress(0.7, desc="分析肿瘤类型")
        tumor_type = determine_tumor_type(stats)
        
        # 创建可视化结果
        progress(0.8, desc="生成可视化结果")
        try:
            original_img, segmentation_img = create_visualization(flair_data, segmentation)
        except Exception as e:
            return (None, None, f"可视化生成错误: {str(e)}", "", "", "")
        
        time.sleep(0.5)
        
        # 生成治疗建议
        progress(0.9, desc="生成治疗建议")
        treatment_recommendations = generate_treatment_recommendations(stats, tumor_type)
        
        time.sleep(0.5)
        progress(1.0, desc="处理完成")
        
        # 格式化输出
        stats_text = f"""
        肿瘤总体积: {stats['total_volume']:.1f} mm³
        肿瘤核心体积: {stats['tumor_core_volume']:.1f} mm³
        全肿瘤体积: {stats['whole_tumor_volume']:.1f} mm³
        最大直径: {stats['max_diameter']:.1f} mm
        肿瘤负荷: {stats['tumor_burden']:.1f}%
        置信度: {stats['confidence']:.0%}
        """
        
        tumor_info = f"""
        肿瘤类型: {tumor_type['name']}
        恶性程度: {tumor_type['malignancy']}
        描述: {tumor_type['description']}
        """
        
        regions_text = f"""
        增强肿瘤 (ET): {stats['regions']['enhancing']:.0%}
        水肿区域 (ED): {stats['regions']['edema']:.0%}
        坏死区域 (NCR/NET): {stats['regions']['necrosis']:.0%}
        """
        
        recommendations_text = "\n".join([f"- {rec}" for rec in treatment_recommendations])
        
        return (original_img, segmentation_img, stats_text, tumor_info, regions_text, recommendations_text)
    
    except Exception as e:
        return (None, None, f"处理出错: {str(e)}", "", "", "")

# 加载默认示例文件
def load_example_files():
    try:
        # 定义示例文件路径
        flair_path = os.path.join('data', 'BraTS20_Demo_flair.nii')
        t1_path = os.path.join('data', 'BraTS20_Demo_t1.nii')
        t1ce_path = os.path.join('data', 'BraTS20_Demo_t1ce.nii')
        t2_path = os.path.join('data', 'BraTS20_Demo_t2.nii')
        
        # 检查所有文件是否存在
        missing_files = []
        if not os.path.exists(flair_path):
            missing_files.append("FLAIR示例文件")
        if not os.path.exists(t1_path):
            missing_files.append("T1示例文件")
        if not os.path.exists(t1ce_path):
            missing_files.append("T1CE示例文件")
        if not os.path.exists(t2_path):
            missing_files.append("T2示例文件")
        
        if missing_files:
            # 返回5个值：4个None（文件）和1个错误消息
            return None, None, None, None, f"缺少示例文件: {', '.join(missing_files)}"
        
        # 返回5个值：4个文件路径和1个成功消息
        return (
            flair_path,
            t1_path,
            t1ce_path,
            t2_path,
            "已加载示例文件，可以开始分析"
        )
        
    except Exception as e:
        # 异常情况下也返回5个值
        return None, None, None, None, f"加载示例文件失败: {str(e)}"

# 创建Gradio界面
with gr.Blocks(title="脑肿瘤自动分割系统") as demo:
    gr.Markdown("# 脑肿瘤自动分割与分析系统 (BraTS标准)")
    gr.Markdown("上传脑部MRI的四个模态影像（NIfTI格式），系统将自动进行肿瘤分割和分析")
    
    if model is not None and device is not None:
        gr.Markdown(f"✅ 模型加载成功 ({device.type})")
    else:
        gr.Markdown("❌ 模型加载失败，请检查models/best_brats_model_dice.pth文件是否存在")
    
    with gr.Row():
        with gr.Column(scale=1):
            gr.Markdown("### 上传四个模态的影像文件")
            flair_file = gr.File(label="FLAIR模态", file_types=[".nii", ".nii.gz"])
            t1_file = gr.File(label="T1模态", file_types=[".nii", ".nii.gz"])
            t1ce_file = gr.File(label="T1CE模态", file_types=[".nii", ".nii.gz"])
            t2_file = gr.File(label="T2模态", file_types=[".nii", ".nii.gz"])
            
            # 添加加载示例文件按钮
            load_example_btn = gr.Button("加载示例文件")
            example_status = gr.Textbox(label="状态", lines=1, interactive=False)
            
            with gr.Accordion("高级选项", open=False):
                precision = gr.Radio(
                    ["快速模式 (推荐)", "高精度模式"], 
                    label="分割精度", 
                    value="快速模式 (推荐)"
                )
                priority = gr.Radio(
                    ["自动检测所有类型", "优先高级别胶质瘤", "优先低级别胶质瘤"], 
                    label="肿瘤类型优先", 
                    value="自动检测所有类型"
                )
            
            process_btn = gr.Button("开始分析", variant="primary")
        
        with gr.Column(scale=2):
            with gr.Row():
                original_img = gr.Image(label="原始FLAIR影像")
                segmentation_img = gr.Image(label="分割结果")
            
            with gr.Tabs():
                with gr.Tab("肿瘤统计"):
                    stats_text = gr.Textbox(label="统计数据", lines=6)
                
                with gr.Tab("肿瘤类型分析"):
                    tumor_info = gr.Textbox(label="肿瘤类型信息", lines=5)
                
                with gr.Tab("区域分布"):
                    regions_text = gr.Textbox(label="肿瘤区域分布", lines=5)
                
                with gr.Tab("治疗建议"):
                    with gr.Column(scale=2):
                        recommendations_text = gr.Textbox(
                            label="治疗建议", 
                            lines=10,  # 增加行数，从5行增加到10行
                            max_lines=20,  # 允许更多的最大行数
                        )
    
    # 设置按钮点击事件
    load_example_btn.click(
        fn=load_example_files,
        inputs=[],
        outputs=[flair_file, t1_file, t1ce_file, t2_file, example_status]
    )
    
    process_btn.click(
        fn=process_scan,
        inputs=[flair_file, t1_file, t1ce_file, t2_file, precision, priority],
        outputs=[original_img, segmentation_img, stats_text, tumor_info, regions_text, recommendations_text]
    )
    
    gr.Markdown("""
    ### 使用说明
    1. 上传所有四个模态的NIfTI格式脑部MRI影像（.nii 或 .nii.gz）
    2. （可选）点击"加载示例文件"按钮使用系统提供的示例数据
    3. （可选）设置高级选项
    4. 点击"开始分析"按钮
    5. 查看分割结果和分析报告
    
    注意：本系统仅作为辅助诊断工具，不能替代专业医师的诊断结论。
    """)

if __name__ == "__main__":
    demo.launch(server_name="0.0.0.0", server_port=9082)