import os
import tempfile
import numpy as np
import torch
import nibabel as nib
from flask import Flask, request, jsonify, send_file
from flask_cors import CORS
import io
import base64
from PIL import Image
import matplotlib.pyplot as plt
import torch.nn.functional as F

# 确保中文显示正常
plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]

# 初始化Flask应用
app = Flask(__name__)
CORS(app)  # 解决跨域问题

# 配置
UPLOAD_FOLDER = 'uploads'
MODEL_PATH = 'models/best_brats_model_dice.pth'
os.makedirs(UPLOAD_FOLDER, exist_ok=True)

# 设备配置
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 全局变量 - 模型和相关参数
model = None
slice_range = (60, 100)  # 与训练时保持一致

# 模型定义 - 与训练时相同
class ResidualBlock(torch.nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv1 = torch.nn.Conv3d(in_channels, out_channels, kernel_size=3, padding=1)
        self.bn1 = torch.nn.BatchNorm3d(out_channels)
        self.relu = torch.nn.ReLU(inplace=True)
        self.conv2 = torch.nn.Conv3d(out_channels, out_channels, kernel_size=3, padding=1)
        self.bn2 = torch.nn.BatchNorm3d(out_channels)
        
        self.residual = torch.nn.Conv3d(in_channels, out_channels, kernel_size=1) \
            if in_channels != out_channels else torch.nn.Identity()

    def forward(self, x):
        identity = self.residual(x)
        out = self.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        return self.relu(out + identity)


class Improved3DUNet(torch.nn.Module):
    def __init__(self, in_channels=4, out_channels=4, base_filters=16):
        super().__init__()
        self.enc1 = ResidualBlock(in_channels, base_filters)
        self.pool1 = torch.nn.MaxPool3d(2)
        self.enc2 = ResidualBlock(base_filters, base_filters * 2)
        self.pool2 = torch.nn.MaxPool3d(2)
        
        self.bottleneck = ResidualBlock(base_filters * 2, base_filters * 4)
        
        self.up2 = torch.nn.ConvTranspose3d(base_filters * 4, base_filters * 2, kernel_size=2, stride=2)
        self.dec2 = ResidualBlock(base_filters * 4, base_filters * 2)
        self.up1 = torch.nn.ConvTranspose3d(base_filters * 2, base_filters, kernel_size=2, stride=2)
        self.dec1 = ResidualBlock(base_filters * 2, base_filters)
        
        self.out_conv = torch.nn.Conv3d(base_filters, out_channels, kernel_size=1)

    def forward(self, x):
        e1 = self.enc1(x)
        e2 = self.enc2(self.pool1(e1))
        
        b = self.bottleneck(self.pool2(e2))
        
        d2 = self.dec2(torch.cat([self.up2(b), e2], dim=1))
        d1 = self.dec1(torch.cat([self.up1(d2), e1], dim=1))
        
        return self.out_conv(d1)


def load_model():
    """加载训练好的模型"""
    global model
    if model is None:
        print(f"加载模型: {MODEL_PATH}")
        model = Improved3DUNet(in_channels=4, out_channels=4, base_filters=16)
        model.load_state_dict(torch.load(MODEL_PATH, map_location=device))
        model.to(device)
        model.eval() # 设置为评估模式
        print("模型加载完成")


def preprocess_data(flair_path, t1_path, t1ce_path, t2_path):
    """预处理输入数据"""
    # 加载四种模态
    modalities = []
    for path in [flair_path, t1_path, t1ce_path, t2_path]:
        img = nib.load(path)
        data = np.array(img.get_fdata(), dtype=np.float32)
        # 选择指定范围的切片
        data = data[:, :, slice_range[0]:slice_range[1]]
        # 标准化
        data = (data - np.mean(data)) / (np.std(data) + 1e-6)
        modalities.append(np.nan_to_num(data))
    
    # 转换为张量 [C, H, W, D]
    data = np.stack(modalities)
    data = torch.tensor(data, dtype=torch.float32).unsqueeze(0)  # 添加批次维度
    return data.to(device)


def postprocess_result(output):
    """后处理模型输出"""
    # 获取预测标签
    pred = torch.argmax(output, dim=1).squeeze().cpu().numpy()
    # 调整标签回到原始范围
    pred[pred == 3] = 4  # 恢复原始标签（训练时将4转为3）
    return pred


def calculate_tumor_stats(segmentation):
    """计算肿瘤统计信息"""
    # 肿瘤区域（标签1, 2, 4）
    tumor_mask = (segmentation == 1) | (segmentation == 2) | (segmentation == 4)
    total_volume = np.sum(tumor_mask) * 0.15  # 假设体素大小约为0.15 cm³
    
    # 坏死区（1）
    necrosis_mask = (segmentation == 1)
    necrosis_volume = np.sum(necrosis_mask) * 0.15
    
    # 水肿区（2）
    edema_mask = (segmentation == 2)
    edema_volume = np.sum(edema_mask) * 0.15
    
    # 增强肿瘤区（4）
    enhancing_mask = (segmentation == 4)
    enhancing_volume = np.sum(enhancing_mask) * 0.15
    
    # 计算最大直径（简化版）
    if np.sum(tumor_mask) > 0:
        coords = np.where(tumor_mask)
        max_diameter = np.sqrt(
            (np.max(coords[0]) - np.min(coords[0]))**2 +
            (np.max(coords[1]) - np.min(coords[1]))** 2 +
            (np.max(coords[2]) - np.min(coords[2]))**2
        ) * 0.5  # 假设像素间距约为0.5mm
        max_diameter = max_diameter / 10  # 转换为cm
    else:
        max_diameter = 0
    
    return {
        "total_volume": round(total_volume, 1),
        "necrosis_volume": round(necrosis_volume, 1),
        "edema_volume": round(edema_volume, 1),
        "enhancing_volume": round(enhancing_volume, 1),
        "max_diameter": round(max_diameter, 1),
        "has_tumor": total_volume > 0.1
    }


def generate_treatment_recommendations(stats):
    """基于肿瘤统计信息生成初步治疗建议"""
    if not stats["has_tumor"]:
        return [
            "未检测到明显肿瘤区域，建议结合临床症状和其他检查结果进一步确认。",
            "定期随访观察，建议3-6个月后复查MRI。"
        ]
    
    recommendations = []
    
    # 基于肿瘤大小的建议
    if stats["total_volume"] < 5:
        recommendations.append(f"肿瘤总体积较小（{stats['total_volume']} cm³），可考虑密切观察或活检明确病理性质。")
    elif stats["total_volume"] < 20:
        recommendations.append(f"肿瘤体积中等（{stats['total_volume']} cm³），建议进一步明确病理诊断，制定个体化治疗方案。")
    else:
        recommendations.append(f"肿瘤体积较大（{stats['total_volume']} cm³），可能需要考虑手术切除，具体需评估患者整体状况。")
    
    # 基于增强区域的建议
    if stats["enhancing_volume"] > 0:
        recommendations.append(f"检测到增强肿瘤区域（{stats['enhancing_volume']} cm³），提示可能为高级别胶质瘤，建议尽快进行多学科会诊。")
    
    # 通用建议
    recommendations.append("建议进行全面神经功能评估，包括认知功能、运动功能等。")
    recommendations.append("治疗方案应结合患者年龄、身体状况、肿瘤位置及病理类型综合制定。")
    recommendations.append("所有治疗决策需由多学科团队（神经外科、肿瘤科、放疗科等）共同商议决定。")
    
    # 最后添加免责声明
    recommendations.append("\n【免责声明】：以上建议仅基于影像分析结果，不构成最终医疗决策。具体治疗方案请遵循专业医师指导。")
    
    return recommendations


def create_visualization(original, segmentation, slice_idx=20):
    """创建可视化结果"""
    # 选择一个中间切片进行可视化
    if slice_idx >= original.shape[3]:
        slice_idx = original.shape[3] // 2
    
    # 创建图像
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
    
    # 原始影像（取FLAIR模态）
    ax1.imshow(original[0, :, :, slice_idx], cmap='gray')
    ax1.set_title('原始影像 (FLAIR)')
    ax1.axis('off')
    
    # 分割结果
    ax2.imshow(original[0, :, :, slice_idx], cmap='gray', alpha=0.7)
    
    # 叠加不同肿瘤区域
    mask1 = segmentation == 1  # 坏死区
    mask2 = segmentation == 2  # 水肿区
    mask4 = segmentation == 4  # 增强肿瘤区
    
    ax2.imshow(np.ma.masked_where(~mask1[:, :, slice_idx], mask1[:, :, slice_idx]), 
               cmap='Reds', alpha=0.6, interpolation='none')
    ax2.imshow(np.ma.masked_where(~mask2[:, :, slice_idx], mask2[:, :, slice_idx]), 
               cmap='YlOrBr', alpha=0.5, interpolation='none')
    ax2.imshow(np.ma.masked_where(~mask4[:, :, slice_idx], mask4[:, :, slice_idx]), 
               cmap='Greens', alpha=0.6, interpolation='none')
    
    ax2.set_title('肿瘤分割结果')
    ax2.axis('off')
    
    # 添加图例
    from matplotlib.patches import Patch
    legend_elements = [
        Patch(facecolor='red', edgecolor='w', alpha=0.6, label='坏死区'),
        Patch(facecolor='#FFA500', edgecolor='w', alpha=0.5, label='水肿区'),
        Patch(facecolor='green', edgecolor='w', alpha=0.6, label='增强肿瘤区')
    ]
    ax2.legend(handles=legend_elements, loc='lower right', fontsize=8)
    
    # 保存到内存
    buf = io.BytesIO()
    plt.tight_layout()
    plt.savefig(buf, format='png', dpi=300, bbox_inches='tight')
    buf.seek(0)
    
    # 转换为base64
    img = Image.open(buf)
    img_byte_arr = io.BytesIO()
    img.save(img_byte_arr, format='PNG')
    img_byte_arr.seek(0)
    img_base64 = base64.b64encode(img_byte_arr.read()).decode('utf-8')
    
    return img_base64


@app.route('/api/upload', methods=['POST'])
def upload_files():
    """处理文件上传并进行分割"""
    try:
        # 检查是否有文件上传
        required_files = ['flair', 't1', 't1ce', 't2']
        for file_type in required_files:
            if file_type not in request.files:
                return jsonify({"error": f"缺少{file_type}文件"}), 400
        
        # 保存上传的文件
        file_paths = {}
        for file_type in required_files:
            file = request.files[file_type]
            if file.filename == '':
                return jsonify({"error": f"{file_type}文件名为空"}), 400
            
            # 保存到临时文件
            temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.nii.gz')
            file.save(temp_file.name)
            file_paths[file_type] = temp_file.name
        
        # 预处理数据
        input_data = preprocess_data(
            file_paths['flair'],
            file_paths['t1'],
            file_paths['t1ce'],
            file_paths['t2']
        )
        
        # 模型推理
        with torch.no_grad():
            output = model(input_data)
        
        # 后处理结果
        segmentation = postprocess_result(output)
        
        # 计算肿瘤统计信息
        stats = calculate_tumor_stats(segmentation)
        
        # 生成可视化结果
        visualization = create_visualization(input_data.cpu().numpy(), segmentation)
        
        # 生成治疗建议
        recommendations = generate_treatment_recommendations(stats)
        
        # 清理临时文件
        for path in file_paths.values():
            os.unlink(path)
        
        # 返回结果
        return jsonify({
            "success": True,
            "stats": stats,
            "visualization": visualization,
            "recommendations": recommendations
        })
        
    except Exception as e:
        print(f"处理错误: {str(e)}")
        return jsonify({"error": str(e)}), 500


@app.route('/api/status', methods=['GET'])
def check_status():
    """检查服务状态"""
    global model
    return jsonify({
        "status": "running",
        "model_loaded": model is not None,
        "device": str(device)
    })


if __name__ == '__main__':
    # 启动时加载模型
    load_model()
    # 启动服务
    app.run(host='0.0.0.0', port=5000, debug=True)
