import os
import tempfile
import numpy as np
import nibabel as nib
from flask import Flask, request, jsonify
from flask_cors import CORS
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
import base64
from io import BytesIO
import logging

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

app = Flask(__name__)
# 允许跨域请求
CORS(app, resources={r"/api/*": {"origins": "*"}})

# 确保中文正常显示
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]

# 全局变量存储模型
model = None

# 3D U-Net模型定义
class ResidualBlock(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(ResidualBlock, self).__init__()
        self.conv1 = nn.Conv3d(in_channels, out_channels, kernel_size=3, padding=1)
        self.bn1 = nn.BatchNorm3d(out_channels)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv3d(out_channels, out_channels, kernel_size=3, padding=1)
        self.bn2 = nn.BatchNorm3d(out_channels)
        
        # 如果输入输出通道不同，使用1x1卷积调整
        if in_channels != out_channels:
            self.shortcut = nn.Conv3d(in_channels, out_channels, kernel_size=1)
        else:
            self.shortcut = nn.Identity()
    
    def forward(self, x):
        residual = self.shortcut(x)
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
        out = self.conv2(out)
        out = self.bn2(out)
        out += residual
        out = self.relu(out)
        return out

class UNet3D(nn.Module):
    def __init__(self, in_channels=1, out_channels=4):
        super(UNet3D, self).__init__()
        
        # 编码器
        self.enc1 = ResidualBlock(in_channels, 32)
        self.pool1 = nn.MaxPool3d(kernel_size=2, stride=2)
        
        self.enc2 = ResidualBlock(32, 64)
        self.pool2 = nn.MaxPool3d(kernel_size=2, stride=2)
        
        self.enc3 = ResidualBlock(64, 128)
        self.pool3 = nn.MaxPool3d(kernel_size=2, stride=2)
        
        # 瓶颈
        self.bottleneck = ResidualBlock(128, 256)
        
        # 解码器
        self.upconv3 = nn.ConvTranspose3d(256, 128, kernel_size=2, stride=2)
        self.dec3 = ResidualBlock(256, 128)
        
        self.upconv2 = nn.ConvTranspose3d(128, 64, kernel_size=2, stride=2)
        self.dec2 = ResidualBlock(128, 64)
        
        self.upconv1 = nn.ConvTranspose3d(64, 32, kernel_size=2, stride=2)
        self.dec1 = ResidualBlock(64, 32)
        
        # 输出层
        self.outconv = nn.Conv3d(32, out_channels, kernel_size=1)
    
    def forward(self, x):
        # 编码器
        enc1 = self.enc1(x)
        enc2 = self.enc2(self.pool1(enc1))
        enc3 = self.enc3(self.pool2(enc2))
        
        # 瓶颈
        bottleneck = self.bottleneck(self.pool3(enc3))
        
        # 解码器
        dec3 = self.upconv3(bottleneck)
        dec3 = torch.cat([dec3, enc3], dim=1)
        dec3 = self.dec3(dec3)
        
        dec2 = self.upconv2(dec3)
        dec2 = torch.cat([dec2, enc2], dim=1)
        dec2 = self.dec2(dec2)
        
        dec1 = self.upconv1(dec2)
        dec1 = torch.cat([dec1, enc1], dim=1)
        dec1 = self.dec1(dec1)
        
        # 输出
        out = self.outconv(dec1)
        return out

def load_model():
    """加载预训练模型"""
    global model
    try:
        # 初始化模型
        model = UNet3D(in_channels=1, out_channels=4)
        
        # 如果有预训练权重，这里加载
        # 实际应用中应该加载真实的模型权重
        # model.load_state_dict(torch.load('path/to/model_weights.pth'))
        
        model.eval()
        logger.info("模型加载成功")
        return True
    except Exception as e:
        logger.error(f"模型加载失败: {str(e)}")
        return False

# 预处理数据
def preprocess_data(nifti_data, modality):
    """
    预处理NIfTI数据
    
    参数:
        nifti_data: NIfTI数据数组
        modality: 影像模态
    
    返回:
        预处理后的数据
    """
    try:
        # 标准化处理
        mean = np.mean(nifti_data)
        std = np.std(nifti_data)
        if std > 0:
            nifti_data = (nifti_data - mean) / std
        else:
            nifti_data = nifti_data - mean
        
        # 根据模态进行特定预处理
        if modality == 'flair':
            # FLAIR模态特定处理
            nifti_data = np.clip(nifti_data, -1, 3)
        elif modality in ['t1', 't1ce']:
            # T1和T1CE模态特定处理
            nifti_data = np.clip(nifti_data, -1, 4)
        elif modality == 't2':
            # T2模态特定处理
            nifti_data = np.clip(nifti_data, -1, 2.5)
        
        # 调整尺寸为模型输入大小 (128x128x128)
        # 这里使用简单的插值方法，实际应用中可能需要更复杂的处理
        from scipy.ndimage import zoom
        target_shape = (128, 128, 128)
        zoom_factors = [t/s for t, s in zip(target_shape, nifti_data.shape)]
        processed_data = zoom(nifti_data, zoom_factors, order=1)
        
        # 添加通道维度和批次维度
        processed_data = processed_data[np.newaxis, np.newaxis, ...]
        
        # 转换为PyTorch张量
        return torch.FloatTensor(processed_data)
    except Exception as e:
        logger.error(f"数据预处理失败: {str(e)}")
        raise

# 计算肿瘤统计数据
def calculate_tumor_stats(segmentation, original_data, affine):
    """
    计算肿瘤统计数据
    
    参数:
        segmentation: 分割结果数组 (0=背景, 1=水肿, 2=非增强肿瘤, 4=增强肿瘤)
        original_data: 原始影像数据
        affine: NIfTI affine矩阵，用于计算真实世界坐标
    
    返回:
        包含统计信息的字典
    """
    try:
        # 计算体素大小 (mm³)
        voxel_volume = np.prod(np.diag(affine[:3, :3]))
        
        # 计算各区域体积
        total_tumor = np.sum((segmentation > 0) & (segmentation != 0))
        edema_volume = np.sum(segmentation == 1) * voxel_volume
        non_enhancing_volume = np.sum(segmentation == 2) * voxel_volume
        enhancing_volume = np.sum(segmentation == 4) * voxel_volume
        total_volume = (edema_volume + non_enhancing_volume + enhancing_volume)
        
        # 计算肿瘤占比
        brain_mask = original_data > np.percentile(original_data, 10)  # 简单的脑掩码
        brain_volume = np.sum(brain_mask) * voxel_volume
        tumor_burden = (total_volume / brain_volume) * 100 if brain_volume > 0 else 0
        
        # 计算最大直径
        max_diameter = 0
        if total_tumor > 0:
            # 获取肿瘤体素坐标
            coords = np.argwhere(segmentation > 0)
            
            # 计算坐标在真实世界中的位置
            world_coords = np.dot(affine[:3, :3], coords.T).T + affine[:3, 3]
            
            # 计算最大距离（直径）
            from scipy.spatial.distance import pdist
            if len(world_coords) > 1:
                distances = pdist(world_coords)
                max_diameter = np.max(distances)
        
        # 计算各区域占比
        enhancing_ratio = enhancing_volume / total_volume if total_volume > 0 else 0
        edema_ratio = edema_volume / total_volume if total_volume > 0 else 0
        necrosis_ratio = non_enhancing_volume / total_volume if total_volume > 0 else 0
        
        return {
            "total_volume": total_volume,
            "max_diameter": max_diameter,
            "tumor_burden": tumor_burden,
            "confidence": np.random.uniform(0.85, 0.98),  # 模拟置信度
            "regions": {
                "enhancing": enhancing_ratio,
                "edema": edema_ratio,
                "necrosis": necrosis_ratio
            }
        }
    except Exception as e:
        logger.error(f"肿瘤统计计算失败: {str(e)}")
        raise

# 创建可视化结果
def create_visualization(original_data, segmentation, modality):
    """
    创建原始影像和分割结果的可视化
    
    参数:
        original_data: 原始影像数据
        segmentation: 分割结果
        modality: 影像模态
    
    返回:
        原始影像和分割结果的base64编码图像
    """
    try:
        # 找到肿瘤中心切片
        tumor_mask = segmentation > 0
        if np.any(tumor_mask):
            # 计算肿瘤中心
            z_indices = np.where(np.any(tumor_mask, axis=(0, 1)))[0]
            if len(z_indices) > 0:
                z_center = z_indices[len(z_indices) // 2]
            else:
                z_center = original_data.shape[2] // 2
        else:
            z_center = original_data.shape[2] // 2
        
        # 获取中心切片
        original_slice = original_data[:, :, z_center]
        segmentation_slice = segmentation[:, :, z_center]
        
        # 标准化原始切片以便显示
        orig_min, orig_max = np.percentile(original_slice, 1), np.percentile(original_slice, 99)
        original_slice_normalized = (original_slice - orig_min) / (orig_max - orig_min + 1e-8)
        original_slice_normalized = np.clip(original_slice_normalized, 0, 1)
        
        # 创建原始图像
        fig, ax = plt.subplots(figsize=(8, 8))
        ax.imshow(original_slice_normalized, cmap='gray')
        ax.axis('off')
        plt.tight_layout(pad=0)
        
        # 保存原始图像到 BytesIO
        orig_buf = BytesIO()
        plt.savefig(orig_buf, format='png', bbox_inches='tight', pad_inches=0, dpi=150)
        orig_buf.seek(0)
        orig_img = Image.open(orig_buf)
        
        # 转换为base64
        orig_b64_buf = BytesIO()
        orig_img.save(orig_b64_buf, format='png')
        original_b64 = base64.b64encode(orig_b64_buf.getvalue()).decode('utf-8')
        
        # 创建分割结果图像
        fig, ax = plt.subplots(figsize=(8, 8))
        ax.imshow(original_slice_normalized, cmap='gray')
        
        # 叠加分割结果（不同颜色表示不同区域）
        ax.imshow(np.where(segmentation_slice == 1, 1, 0), cmap='YlOrBr', alpha=0.5)  # 水肿 - 黄色
        ax.imshow(np.where(segmentation_slice == 2, 1, 0), cmap='Purples', alpha=0.5)  # 非增强肿瘤 - 紫色
        ax.imshow(np.where(segmentation_slice == 4, 1, 0), cmap='Reds', alpha=0.5)    # 增强肿瘤 - 红色
        
        ax.axis('off')
        plt.tight_layout(pad=0)
        
        # 添加图例
        from matplotlib.patches import Patch
        legend_elements = [
            Patch(facecolor='yellow', edgecolor='yellow', alpha=0.5, label='水肿区域'),
            Patch(facecolor='purple', edgecolor='purple', alpha=0.5, label='非增强肿瘤'),
            Patch(facecolor='red', edgecolor='red', alpha=0.5, label='增强肿瘤')
        ]
        ax.legend(handles=legend_elements, loc='lower right', fontsize=8)
        
        # 保存分割图像到 BytesIO
        seg_buf = BytesIO()
        plt.savefig(seg_buf, format='png', bbox_inches='tight', pad_inches=0, dpi=150)
        seg_buf.seek(0)
        seg_img = Image.open(seg_buf)
        
        # 转换为base64
        seg_b64_buf = BytesIO()
        seg_img.save(seg_b64_buf, format='png')
        segmentation_b64 = base64.b64encode(seg_b64_buf.getvalue()).decode('utf-8')
        
        # 关闭图像以释放资源
        plt.close('all')
        
        return original_b64, segmentation_b64
    except Exception as e:
        logger.error(f"可视化创建失败: {str(e)}")
        raise

# 生成治疗建议
def generate_treatment_recommendations(tumor_stats, tumor_type):
    """
    根据肿瘤统计数据和类型生成治疗建议
    
    参数:
        tumor_stats: 肿瘤统计数据
        tumor_type: 肿瘤类型
    
    返回:
        治疗建议列表
    """
    recommendations = []
    
    try:
        # 基于肿瘤大小的建议
        if tumor_stats["total_volume"] > 5000:  # 5000 mm³ = 5 cm³
            recommendations.append("肿瘤体积较大，建议优先考虑手术切除")
        
        # 基于肿瘤类型的建议
        if tumor_type["name"] == "高级别胶质瘤":
            recommendations.append("建议术后进行同步放化疗（替莫唑胺）")
            recommendations.append("考虑进行分子病理检测，包括IDH突变和1p/19q共缺失状态")
        elif tumor_type["name"] == "低级别胶质瘤":
            recommendations.append("建议定期随访观察，每3-6个月进行一次MRI检查")
            recommendations.append("若出现进展迹象，考虑手术或放疗")
        
        # 基于增强程度的建议
        if tumor_stats["regions"]["enhancing"] > 0.3:  # 增强部分占比超过30%
            recommendations.append("肿瘤增强明显，提示可能为高级别，建议积极治疗")
        
        # 通用建议
        recommendations.append("建议多学科会诊（神经外科、肿瘤科、放疗科）制定个性化治疗方案")
        recommendations.append("考虑进行功能MRI检查，评估肿瘤与脑功能区的关系")
        
        return recommendations
    except Exception as e:
        logger.error(f"治疗建议生成失败: {str(e)}")
        return ["无法生成治疗建议，请咨询专业医师"]

# 确定肿瘤类型
def determine_tumor_type(segmentation_stats):
    """
    根据分割结果确定肿瘤类型
    
    参数:
        segmentation_stats: 分割统计数据
    
    返回:
        肿瘤类型信息
    """
    try:
        # 基于增强比例判断肿瘤级别
        enhancing_ratio = segmentation_stats["regions"]["enhancing"]
        
        if enhancing_ratio > 0.2:  # 增强部分占比超过20%
            return {
                "name": "高级别胶质瘤",
                "description": "高级别胶质瘤通常生长迅速，恶性程度高，预后较差。常见类型包括胶质母细胞瘤（GBM）。",
                "malignancy": "高"
            }
        else:
            return {
                "name": "低级别胶质瘤",
                "description": "低级别胶质瘤生长相对缓慢，恶性程度较低，预后较好。常见类型包括星形细胞瘤和少突胶质细胞瘤。",
                "malignancy": "中"
            }
    except Exception as e:
        logger.error(f"肿瘤类型判断失败: {str(e)}")
        return {
            "name": "未知肿瘤类型",
            "description": "无法确定肿瘤类型，建议进一步检查。",
            "malignancy": "未知"
        }

@app.route('/api/upload', methods=['POST'])
def upload_file():
    """处理文件上传和分割请求"""
    try:
        # 检查是否有文件上传
        if 'image' not in request.files:
            return jsonify({"error": "未上传文件"}), 400
        
        file = request.files['image']
        
        # 检查文件名
        if file.filename == '':
            return jsonify({"error": "未选择文件"}), 400
        
        # 检查文件格式
        if not (file.filename.endswith('.nii') or file.filename.endswith('.nii.gz')):
            return jsonify({"error": "文件格式不正确，仅支持 .nii 和 .nii.gz 格式"}), 400
        
        # 获取其他参数
        modality = request.form.get('modality', 'flair')
        precision = request.form.get('precision', 'fast')
        priority = request.form.get('priority', 'auto')
        
        logger.info(f"接收文件: {file.filename}, 模态: {modality}, 精度: {precision}, 优先级: {priority}")
        
        # 创建临时文件保存上传的NIfTI文件
        with tempfile.NamedTemporaryFile(suffix='.nii.gz', delete=False) as temp_file:
            file.save(temp_file)
            temp_filename = temp_file.name
        
        try:
            # 读取NIfTI文件
            logger.info("开始读取NIfTI文件")
            nifti_img = nib.load(temp_filename)
            nifti_data = nifti_img.get_fdata()
            affine = nifti_img.affine
            
            # 数据预处理
            logger.info("开始数据预处理")
            processed_data = preprocess_data(nifti_data, modality)
            
            # 模型推理（使用模拟结果，实际应用中应使用真实模型）
            logger.info("开始模型推理")
            with torch.no_grad():
                # 对于演示，我们生成模拟的分割结果
                # 在实际应用中，应该是: output = model(processed_data)
                # 这里使用随机生成的分割结果用于演示
                seg_shape = nifti_data.shape
                segmentation = np.zeros(seg_shape, dtype=np.int32)
                
                # 随机生成一些肿瘤区域用于演示
                if np.random.random() > 0.2:  # 80%的概率生成肿瘤
                    # 随机选择一个中心
                    center = [np.random.randint(30, s-30) for s in seg_shape]
                    
                    # 生成水肿区域（1）
                    x, y, z = np.meshgrid(
                        np.arange(center[0]-20, center[0]+20),
                        np.arange(center[1]-20, center[1]+20),
                        np.arange(center[2]-10, center[2]+10)
                    )
                    edema_mask = (x-center[0])**2 + (y-center[1])** 2 + (z-center[2])** 2 < 20**2
                    segmentation[edema_mask] = 1
                    
                    # 生成非增强肿瘤区域（2）
                    x, y, z = np.meshgrid(
                        np.arange(center[0]-10, center[0]+10),
                        np.arange(center[1]-10, center[1]+10),
                        np.arange(center[2]-5, center[2]+5)
                    )
                    non_enhancing_mask = (x-center[0])** 2 + (y-center[1])**2 + (z-center[2])** 2 < 10**2
                    segmentation[non_enhancing_mask] = 2
                    
                    # 生成增强肿瘤区域（4）
                    x, y, z = np.meshgrid(
                        np.arange(center[0]-5, center[0]+5),
                        np.arange(center[1]-5, center[1]+5),
                        np.arange(center[2]-3, center[2]+3)
                    )
                    enhancing_mask = (x-center[0])** 2 + (y-center[1])**2 + (z-center[2])** 2 < 5**2
                    segmentation[enhancing_mask] = 4
            
            # 计算肿瘤统计数据
            logger.info("计算肿瘤统计数据")
            stats = calculate_tumor_stats(segmentation, nifti_data, affine)
            
            # 确定肿瘤类型
            logger.info("确定肿瘤类型")
            tumor_type = determine_tumor_type(stats)
            
            # 创建可视化结果
            logger.info("创建可视化结果")
            original_vis, segmentation_vis = create_visualization(nifti_data, segmentation, modality)
            
            # 生成治疗建议
            logger.info("生成治疗建议")
            treatment_recommendations = generate_treatment_recommendations(stats, tumor_type)
            
            # 构建响应
            response = {
                "success": True,
                "original_visualization": original_vis,
                "segmentation_visualization": segmentation_vis,
                "stats": stats,
                "tumor_type": tumor_type,
                "treatment_recommendations": treatment_recommendations
            }
            
            return jsonify(response)
            
        except Exception as e:
            logger.error(f"处理错误: {str(e)}")
            return jsonify({"error": f"处理文件时出错: {str(e)}"}), 500
        finally:
            # 确保临时文件被删除
            if os.path.exists(temp_filename):
                os.remove(temp_filename)
    
    except Exception as e:
        logger.error(f"请求处理错误: {str(e)}")
        return jsonify({"error": "服务器内部错误"}), 500

@app.route('/api/health', methods=['GET'])
def health_check():
    """健康检查接口"""
    return jsonify({
        "status": "healthy",
        "model_loaded": model is not None
    })

if __name__ == '__main__':
    # 启动时加载模型
    load_model()
    # 启动服务，允许外部访问
    app.run(host='0.0.0.0', port=9082, debug=True)
