# backend/main.py
from fastapi import FastAPI, Request, Response, UploadFile, File, Query, Form
from fastapi.responses import StreamingResponse, JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from typing import List, Dict, Any, Optional
import re
import json, os, time, asyncio, uuid
from datetime import datetime
import threading
import shutil
from medical_analysis import medical_analyzer
from video_analysis import video_analyzer
import requests
app = FastAPI()
LOCK = threading.Lock()

# 导入 RAG 集成模块
try:
    from rag_integration import get_rag_retriever, is_rag_available
    RAG_ENABLED = True
    print("✅ RAG 模块导入成功")
except Exception as e:
    print(f"⚠️  RAG 模块导入失败: {e}")
    RAG_ENABLED = False
app.add_middleware(
    CORSMiddleware,
    allow_origins=["http://localhost:5173", "http://127.0.0.1:5173", "*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 静态文件服务
STATIC_DIR = os.path.join(os.path.dirname(__file__), "static")
os.makedirs(STATIC_DIR, exist_ok=True)
app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")

# ============================================================
# 💡 统一流式响应包装器
# ============================================================
def create_stream_generator(generator_function, include_metadata=None):
    """
    统一的流式响应包装器，处理所有SSE端点
    
    Args:
        generator_function: 生成纯文本块的函数
        include_metadata: 可选的元数据字典（如 file_path）
    
    Yields:
        SSE格式的数据流
    """
    def stream_generator():
        import json
        try:
            # 1. 如果有元数据，首先发送
            if include_metadata:
                metadata_json = {"type": "metadata", **include_metadata}
                yield f"data: {json.dumps(metadata_json, ensure_ascii=False)}\n\n"
                print(f"📤 [元数据] 已发送: {metadata_json}")
            
            # 2. 发送内容流
            chunk_count = 0
            for chunk in generator_function():
                chunk_count += 1
                # 统一格式：{"type": "content", "content": "..."}
                content_json = {"type": "content", "content": chunk}
                yield f"data: {json.dumps(content_json, ensure_ascii=False)}\n\n"
                
                if chunk_count <= 3 or chunk_count % 50 == 0:
                    print(f"   📝 [Chunk {chunk_count}] 发送: {chunk[:30]}...")
            
            print(f"✅ [流式包装器] 完成，共{chunk_count}个chunk")
            
        except Exception as e:
            # 统一错误格式
            print(f"❌ [流式包装器] 错误: {str(e)}")
            import traceback
            traceback.print_exc()
            error_json = {"type": "error", "error": str(e)}
            yield f"data: {json.dumps(error_json, ensure_ascii=False)}\n\n"
        
        finally:
            # 💡 关键：确保始终发送 [DONE] 信号
            print("🏁 [流式包装器] 发送 [DONE] 信号")
            yield "data: [DONE]\n\n"
    
    return stream_generator

DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
os.makedirs(DATA_DIR, exist_ok=True)
# SESSIONS_PATH = os.path.join(DATA_DIR, "sessions.json")
# MESSAGES_DIR = os.path.join(DATA_DIR, "messages")
# os.makedirs(MESSAGES_DIR, exist_ok=True)
# 定义支持的模块列表

SUPPORTED_MODULES = ["blockchain", "supervision", "medical", "math", "video"]

default_module = "medical"
#  为每个模块创建独立的目录结构
# 为每个模块创建独立的目录结构
def get_module_dir(module: str) -> str:
    """获取指定模块的数据目录"""
    # 验证模块是否在支持的列表中
    if module not in SUPPORTED_MODULES:
        raise ValueError(f"不支持的模块: {module}，支持的模块列表: {SUPPORTED_MODULES}")
    module_dir = os.path.join(DATA_DIR, module)
    os.makedirs(module_dir, exist_ok=True)
    return module_dir

def get_sessions_path(module: str) -> str:
    """获取指定模块的sessions.json路径"""
    return os.path.join(get_module_dir(module), "sessions.json")

def get_messages_dir(module: str) -> str:
    """获取指定模块的messages目录路径"""
    messages_dir = os.path.join(get_module_dir(module), "messages")
    os.makedirs(messages_dir, exist_ok=True)
    return messages_dir

for module in SUPPORTED_MODULES:
    get_module_dir(module)
    get_messages_dir(module)
    

def now_iso():
    return datetime.utcnow().isoformat() + "Z"

def load_sessions(module: str = default_module) -> List[Dict[str, Any]]:
    """加载指定模块的会话列表"""
    # 确保模块在支持的列表中
    if module not in SUPPORTED_MODULES:
        module = default_module
    sessions_path = get_sessions_path(module)
    if not os.path.exists(sessions_path):
        with open(sessions_path, "w", encoding="utf-8") as f:
            json.dump({"sessions": []}, f, ensure_ascii=False, indent=4)
        return []
    with open(sessions_path, "r", encoding="utf-8") as f:
        return json.load(f).get("sessions", [])

def save_sessions(sessions: List[Dict[str, Any]], module: str = default_module):
    """保存指定模块的会话列表"""
    # 确保模块在支持的列表中
    if module not in SUPPORTED_MODULES:
        module = default_module
    sessions_path = get_sessions_path(module)
    with open(sessions_path, "w", encoding="utf-8") as f:
        json.dump({"sessions": sessions}, f, ensure_ascii=False, indent=4)

def session_messages_path(sid: str, module: str = default_module):
    """获取指定模块的消息文件路径"""
    # 确保模块在支持的列表中
    if module not in SUPPORTED_MODULES:
        module = default_module
    return os.path.join(get_messages_dir(module), f"{sid}.json")

def load_messages(sid: str, module: str = default_module) -> List[Dict[str, Any]]:
    """加载指定模块的会话消息"""
    # 确保模块在支持的列表中
    if module not in SUPPORTED_MODULES:
        module = default_module
    p = session_messages_path(sid, module)
    if not os.path.exists(p):
        return []
    with open(p, "r", encoding="utf-8") as f:
        return json.load(f).get("messages", [])

def save_messages(sid: str, messages: List[Dict[str, Any]], module: str = default_module):
    """保存指定模块的会话消息"""
    # 确保模块在支持的列表中
    if module not in SUPPORTED_MODULES:
        module = default_module
    with open(session_messages_path(sid, module), "w", encoding="utf-8") as f:
        json.dump({"messages": messages}, f, ensure_ascii=False, indent=4)

def list_all_modules() -> List[str]:
    """列出所有支持的模块"""
    return SUPPORTED_MODULES

def load_all_sessions() -> List[Dict[str, Any]]:
    """加载所有模块的会话"""
    all_sessions = []
    for module in list_all_modules():
        sessions = load_sessions(module)
        # 为每个会话添加模块标识
        for session in sessions:
            session["module"] = module
            all_sessions.append(session)
    return all_sessions

@app.get("/api/sessions")
def list_sessions(module: Optional[str] = Query(default=None, description="模块名称，不传则获取所有模块的会话")):
    with LOCK:
        if module is not None:
            # 获取指定模块的会话
            sessions = load_sessions(module)
            # 为每个会话添加模块标识
            for session in sessions:
                session["module"] = module
        else:
            # 获取所有模块的会话
            sessions = load_all_sessions()
    
    # 统一排序（最近更新在前）
    sessions.sort(key=lambda x: x.get("updated_at", ""), reverse=True)
    return {"sessions": sessions}
@app.post("/api/sessions")
async def create_session(payload: Dict[str, Any] | None = None):
    payload = payload or {}
    sid = str(uuid.uuid4())
    ts = now_iso()
    title = payload.get("title") or f"会话 {sid[:6]}"
    # 获取模块标识，默认为default
    module = payload.get("module", default_module)
    # 确保模块在支持的列表中
    if module not in SUPPORTED_MODULES:
        module = default_module
    sess = {"id": sid, "title": title, "module": module, "created_at": ts, "updated_at": ts}
    with LOCK:
      sessions = load_sessions(module)
      sessions.append(sess)
      save_sessions(sessions, module)
      save_messages(sid, [], module)
    return sess

@app.get("/api/sessions/{sid}")
def get_session(sid: str, module: Optional[str] = Query(default=None, description="模块名称，如果不传会尝试在所有模块中查找")):
    with LOCK:
        if module is not None:
            # 直接在指定模块中查找
            msgs = load_messages(sid, module)
        else:
            # 在所有模块中查找会话
            found_messages = []
            for mod in list_all_modules():
                sessions = load_sessions(mod)
                if any(sess.get("id") == sid for sess in sessions):
                    found_messages = load_messages(sid, mod)
                    break
            msgs = found_messages
    return {"messages": msgs}

@app.patch("/api/sessions/{sid}")
async def rename_session(sid: str, payload: Dict[str, Any], module: Optional[str] = Query(default=None, description="模块名称，如果不传会尝试在所有模块中查找")):
    title = (payload or {}).get("title")
    if not title:
        return JSONResponse({"error":"title required"}, status_code=400)
    
    with LOCK:
        if module is not None:
            # 在指定模块中查找和更新
            sessions = load_sessions(module)
            for s in sessions:
                if s["id"] == sid:
                    s["title"] = title
                    s["updated_at"] = now_iso()
                    save_sessions(sessions, module)
                    return {"ok": True}
        else:
            # 在所有模块中查找和更新
            for mod in list_all_modules():
                sessions = load_sessions(mod)
                for s in sessions:
                    if s["id"] == sid:
                        s["title"] = title
                        s["updated_at"] = now_iso()
                        save_sessions(sessions, mod)
                        return {"ok": True}
    
    return JSONResponse({"error": "session not found"}, status_code=404)

@app.delete("/api/sessions/{sid}")
def delete_session(sid: str, module: Optional[str] = Query(default=None, description="模块名称，如果不传会尝试在所有模块中查找")):
    with LOCK:
        if module is not None:
            # 在指定模块中查找和删除
            sessions = load_sessions(module)
            sessions = [s for s in sessions if s["id"] != sid]
            save_sessions(sessions, module)
            p = session_messages_path(sid, module)
            if os.path.exists(p):
                os.remove(p)
        else:
            # 在所有模块中查找和删除
            for mod in list_all_modules():
                sessions = load_sessions(mod)
                new_sessions = [s for s in sessions if s["id"] != sid]
                if len(new_sessions) != len(sessions):
                    save_sessions(new_sessions, mod)
                    p = session_messages_path(sid, mod)
                    if os.path.exists(p):
                        os.remove(p)
                    break
    
    return {"ok": True}

@app.post("/api/sessions/{session_id}/update-message")
async def update_message(session_id: str, payload: Dict[str, Any], module: Optional[str] = Query(default=None, description="模块名称，如果不传会尝试在所有模块中查找")):
    print(module)
    message_index = payload.get("message_index")
    content = payload.get("content")
    
    if message_index is None or content is None:
        return JSONResponse({"error": "message_index and content required"}, status_code=400)
    
    with LOCK:
        # 查找会话所属模块
        target_module = module
        found = False
        
        if target_module is None:
            for mod in list_all_modules():
                sessions = load_sessions(mod)
                if any(sess.get("id") == session_id for sess in sessions):
                    target_module = mod
                    found = True
                    break
        else:
            # 检查指定模块中是否存在该会话
            sessions = load_sessions(target_module)
            found = any(sess.get("id") == session_id for sess in sessions)
        
        if not found and target_module is None:
            target_module = default_module
        
        msgs = load_messages(session_id, target_module)
        if 0 <= message_index < len(msgs):
            msgs[message_index]["content"] = content
            save_messages(session_id, msgs, target_module)
            
            # 更新会话时间
            sessions = load_sessions(target_module)
            for s in sessions:
                if s["id"] == session_id:
                    s["updated_at"] = now_iso()
                    break
            save_sessions(sessions, target_module)
            
            return {"ok": True}
        return JSONResponse({"error": "message index out of range"}, status_code=400)

@app.post("/api/chat/stream")
async def chat_stream(request: Request, session_id: str, model: str, module: Optional[str] = Query(default=default_module, description="模块名称，默认为default")):
    """
    演示用流式输出接口：
    - 接收 messages（包含 user/assistant 历史），将最后一条 user 作为输入
    - 用固定句子分片输出（后续替换为真实模型流即可）
    - SSE: text/event-stream, 每片 "data: xxx\n\n"，最后 "[DONE]"
    """
    print(f"module是: {module}")
    body = await request.json()
    messages: List[Dict[str, Any]] = body.get("messages", [])
    user_input = ""
    for m in reversed(messages):
        if m.get("role") == "user":
            user_input = m.get("content", "")
            break

    # 保存消息（把最新 user/assistant 写回）
    with LOCK:
        old = load_messages(session_id)
        # 只追加用户消息（assistant 生成时逐步写入末条即可；演示简单些：生成完再一次性写）
        # 这里为了示例，直接把 last 两条替换为最终结果

    async def sse_generator():
        full_response = ""
        try:
            # 尝试与外部模型服务通信
            with requests.post(
                "http://0.0.0.0:8010/generate_stream",  # 流式接口
                json={"prompt": user_input, "max_new_tokens": 512, "temperature": 0.2, "top_p": 0.95},
                headers={"Authorization": "Bearer mysecret"},
                stream=True  # 关键，启用流式响应
            ) as r:
                r.raise_for_status()
                for chunk in r.iter_content(chunk_size=None, decode_unicode=True):
                    if chunk.startswith("[INST]"):
                        chunk = chunk[len("[INST]"):]
                    if "[/INST]" in chunk:
                        chunk = chunk.split("[/INST]")[-1]
                    if chunk:
                        full_response += chunk
                        yield f"data: {chunk}\n\n"
                        await asyncio.sleep(0.03)
        except requests.exceptions.RequestException as e:
            # 发生错误时返回错误信息
            error_msg = f"请求模型服务出错: {str(e)}"
            yield f"data: {error_msg}\n\n"
            yield "data: [DONE]\n\n"
            return

        # 如果外部服务未返回内容，使用演示回复
        if not full_response:
            # 构建演示回复内容
            demo_response = [
                f"这是针对「{model}」模型的流式响应演示，用于打通前后端。你说的是：{user_input}",
                "",
                "Markdown 支持：**加粗**、`code`、",
                "$$E=mc^2$$",
                "以及列表：",
                "- 项目A",
                "- 项目B",
                "",
                "(后续可替换为真实大模型推理流)。"
            ]
            full_response = "\n".join(demo_response)
            for chunk in split_chunks(full_response, 20):
                yield f"data: {chunk}\n\n"
                await asyncio.sleep(0.03)
        
        yield "data: [DONE]\n\n"

        # 结束后持久化
        with LOCK:
            # 确保模块在支持的列表中
            target_module = module if module in SUPPORTED_MODULES else default_module
            
            # 检查指定模块中是否存在该会话
            found = False
            sessions = load_sessions(target_module)
            found = any(sess.get("id") == session_id for sess in sessions)
            
            if not found:
                # 如果在指定模块找不到，尝试在所有模块中查找
                for mod in SUPPORTED_MODULES:
                    sessions = load_sessions(mod)
                    if any(sess.get("id") == session_id for sess in sessions):
                        target_module = mod
                        found = True
                        break
            
            msgs = load_messages(session_id, target_module)
            # 为了让文件里也出现最终 assistant，简单策略：将传入 messages 写为当前状态
            save_messages(session_id, messages, target_module)

            # 同时把完整 assistant 回复追加
            msgs = messages[:-1] if messages and messages[-1].get("role") == "assistant" else messages
            msgs.append({"role": "assistant", "content": full_response})
            print(full_response)
            save_messages(session_id, msgs, target_module)

            # 更新会话时间
            sessions = load_sessions(target_module)
            for s in sessions:
                if s["id"] == session_id:
                    s["updated_at"] = now_iso()
                    break
            save_sessions(sessions, target_module)

    headers = {
        "Content-Type": "text/event-stream; charset=utf-8",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "X-Accel-Buffering": "no",  # Nginx 反代时建议
    }
    return StreamingResponse(sse_generator(), headers=headers)

def split_chunks(s: str, n: int):
    return [s[i:i+n] for i in range(0, len(s), n)]

# ===================== 医学影像分析API ，最后更新时间2025-09-08 11:00=====================
MEDICAL_UPLOADS_DIR = os.path.join(DATA_DIR, "medical_uploads")
os.makedirs(MEDICAL_UPLOADS_DIR, exist_ok=True)

@app.post("/api/medical/upload-image")
async def upload_medical_image(file: UploadFile = File(...)):
    """上传医学影像文件"""
    try:
        # 检查文件类型
        if not file.content_type.startswith('image/'):
            return JSONResponse(
                {"error": "只支持图像文件"}, 
                status_code=400
            )
        
        # 生成唯一文件名
        timestamp = int(time.time())
        filename = f"{timestamp}_{file.filename}"
        file_path = os.path.join(MEDICAL_UPLOADS_DIR, filename)
        
        # 保存文件
        with open(file_path, "wb") as buffer:
            shutil.copyfileobj(file.file, buffer)
        
        return {
            "success": True,
            "filename": filename,
            "file_path": file_path,
            "message": "文件上传成功"
        }
        
    except Exception as e:
        return JSONResponse(
            {"error": f"文件上传失败: {str(e)}"}, 
            status_code=500
        )

@app.post("/api/medical/analyze")
async def analyze_medical_image(image: UploadFile = File(...), custom_prompt: str = None):
    """分析医学影像"""
    try:
        # 保存上传的图像文件
        if not image.filename:
            return JSONResponse(
                {"error": "未选择图像文件"}, 
                status_code=400
            )
        
        # 创建上传目录
        upload_dir = os.path.join(DATA_DIR, "medical_uploads")
        os.makedirs(upload_dir, exist_ok=True)
        
        # 生成唯一文件名
        file_extension = os.path.splitext(image.filename)[1]
        filename = f"analysis_{int(time.time())}{file_extension}"
        file_path = os.path.join(upload_dir, filename)
        
        # 保存文件
        with open(file_path, "wb") as buffer:
            content = await image.read()
            buffer.write(content)
        
        # 执行医学影像分析
        result = medical_analyzer.analyze_medical_image(file_path, custom_prompt)
        

        # 添加图像路径到返回结果（供后续对话使用）
        if result.get("success"):
            result["image_path"] = file_path
        return result
        
    except Exception as e:
        return JSONResponse(
            {"error": f"分析失败: {str(e)}"}, 
            status_code=500
        )


@app.post("/api/medical/analyze-stream")
async def analyze_medical_image_stream(
    image: UploadFile = File(...), 
    input_text: str = None,
    temperature: float = 0.8,
    top_p: float = 0.4
):
    """
    ⚡ 流式医学影像分析（首次诊断：使用 Qwen API）
    
    响应格式：Server-Sent Events (SSE)
    - 首条消息：{"type": "metadata", "file_path": "/path/to/saved/image.jpg"}
    - 内容消息：{"type": "content", "content": "文本块"}
    - 错误消息：{"type": "error", "error": "错误信息"}
    - 结束标记：data: [DONE]
    
    💡 关键改进：前端可以从元数据中获取 file_path，用于后续对话
    """
    try:
        # 保存上传的图像文件
        if not image.filename:
            # SSE错误响应
            async def error_generator():
                error_json = {"type": "error", "error": "未选择图像文件"}
                yield f"data: {json.dumps(error_json, ensure_ascii=False)}\n\n"
                yield "data: [DONE]\n\n"
            
            return StreamingResponse(
                error_generator(),
                media_type="text/event-stream",
                headers={
                    "Cache-Control": "no-cache",
                    "Connection": "keep-alive",
                    "X-Accel-Buffering": "no"
                }
            )
        
        # 创建上传目录
        upload_dir = os.path.join(DATA_DIR, "medical_uploads")
        os.makedirs(upload_dir, exist_ok=True)
        
        # 生成唯一文件名
        file_extension = os.path.splitext(image.filename)[1]
        filename = f"stream_analysis_{int(time.time())}{file_extension}"
        file_path = os.path.join(upload_dir, filename)
        
        # 💡 关键：先保存文件，获取路径
        with open(file_path, "wb") as buffer:
            content = await image.read()
            buffer.write(content)
        
        print(f"🚀 [首次诊断] 图像已保存: {file_path}")
        
        # 💡 关键：定义生成器函数（只返回纯文本块）
        def generator_function():
            return medical_analyzer.analyze_medical_image_complete_stream(
                file_path, 
                input_text, 
                temperature, 
                top_p
            )
        
        # 💡 关键：使用统一包装器，并附带元数据
        wrapped_generator = create_stream_generator(
            generator_function,
            include_metadata={"file_path": file_path}  # 💡 前端将接收此路径
        )
        
        # 返回SSE响应
        return StreamingResponse(
            wrapped_generator(),
            media_type="text/event-stream",
            headers={
                "Cache-Control": "no-cache",
                "Connection": "keep-alive",
                "X-Accel-Buffering": "no",
                "Access-Control-Allow-Origin": "*"
            }
        )
        
    except Exception as e:
        # 非流式错误（文件上传失败等）
        return JSONResponse(
            {"error": f"请求失败: {str(e)}"}, 
            status_code=500
        )


@app.post("/api/medical/analyze-complete")
async def analyze_medical_image_complete(
    image: UploadFile = File(...), 
    input_text: str = None,
    temperature: float = 0.8,
    top_p: float = 0.4
):
    """完整的医学影像分析（基于demo4.py）"""
    try:
        # 保存上传的图像文件
        if not image.filename:
            return JSONResponse(
                {"error": "未选择图像文件"}, 
                status_code=400
            )
        
        # 创建上传目录
        upload_dir = os.path.join(DATA_DIR, "medical_uploads")
        os.makedirs(upload_dir, exist_ok=True)
        
        # 生成唯一文件名
        file_extension = os.path.splitext(image.filename)[1]
        filename = f"complete_analysis_{int(time.time())}{file_extension}"
        file_path = os.path.join(upload_dir, filename)
        
        # 保存文件
        with open(file_path, "wb") as buffer:
            content = await image.read()
            buffer.write(content)
        
        # 执行完整的医学影像分析
        result = medical_analyzer.analyze_medical_image_complete(
            file_path, 
            input_text, 
            temperature, 
            top_p
        )
        
        # 清理临时文件
        try:
            os.remove(file_path)
        except:
            pass
        
        return result
        
    except Exception as e:
        return JSONResponse(
            {"error": f"分析失败: {str(e)}"}, 
            status_code=500
        )



@app.post("/api/medical/continue-conversation")
async def continue_medical_conversation(request: Request):
    """继续医学影像对话（后续问答使用本地模型或API）"""
    try:
        body = await request.json()
        image_path = body.get("image_path")
        input_text = body.get("input_text")
        history = body.get("history", [])
        temperature = body.get("temperature", 0.8)
        top_p = body.get("top_p", 0.4)
        
        if not image_path or not input_text:
            return JSONResponse(
                {"error": "图像路径和输入文本不能为空"}, 
                status_code=400
            )
        
        # 调用后续对话函数
        result = medical_analyzer.continue_medical_conversation(
            image_path, input_text, history, temperature, top_p
        )
        
        return result
        
    except Exception as e:
        return JSONResponse(
            {"error": f"对话失败: {str(e)}"}, 
            status_code=500
        )
@app.post("/api/medical/continue-conversation-stream")
async def continue_conversation_stream(request: Request):
    """
    ⚡ 流式医学影像对话（后续问答：使用本地 XrayGLM 或 API）
    
    响应格式：Server-Sent Events (SSE)
    - 内容消息：{"type": "content", "content": "文本块"}
    - 错误消息：{"type": "error", "error": "错误信息"}
    - 结束标记：data: [DONE]
    
    优势：
    - 实时打字效果
    - 统一用户体验（与首次分析一致）
    - 防止超时
    """
    try:
        body = await request.json()
        image_path = body.get("image_path")
        input_text = body.get("input_text")
        history = body.get("history", [])
        temperature = body.get("temperature", 0.8)
        top_p = body.get("top_p", 0.4)
        
        if not image_path or not input_text:
            # SSE错误响应
            async def error_generator():
                error_json = {"type": "error", "error": "图像路径和输入文本不能为空"}
                yield f"data: {json.dumps(error_json, ensure_ascii=False)}\n\n"
                yield "data: [DONE]\n\n"
            
            return StreamingResponse(
                error_generator(),
                media_type="text/event-stream",
                headers={
                    "Cache-Control": "no-cache",
                    "Connection": "keep-alive",
                    "X-Accel-Buffering": "no"
                }
            )
        
        print(f"🚀 [后续对话] 图像路径: {image_path}")
        print(f"   输入文本: {input_text}")
        print(f"   对话历史: {len(history)}条")
        
        # 💡 关键：定义生成器函数（只返回纯文本块）
        def generator_function():
            return medical_analyzer.generate_text_with_local_model_stream(
                input_text=input_text,
                image_path=image_path,
                history=history,
                temperature=temperature,
                top_p=top_p
            )
        
        # 💡 关键：使用统一包装器（无需元数据，因为前端已有 file_path）
        wrapped_generator = create_stream_generator(generator_function)
        
        # 返回SSE响应
        return StreamingResponse(
            wrapped_generator(),
            media_type="text/event-stream",
            headers={
                "Cache-Control": "no-cache",
                "Connection": "keep-alive",
                "X-Accel-Buffering": "no",
                "Access-Control-Allow-Origin": "*"
            }
        )
        
    except Exception as e:
        # 非流式错误（请求解析失败等）
        return JSONResponse(
            {"error": f"请求失败: {str(e)}"}, 
            status_code=500
        )

@app.get("/api/rag/status")
async def rag_status():
    """检查 RAG 系统状态"""
    if not RAG_ENABLED:
        return {
            "available": False,
            "message": "RAG 模块未导入"
        }
    try:
        available = is_rag_available()
        if available:
            retriever = get_rag_retriever()
            return {
                "available": True,
                "message": "RAG 系统运行正常",
                "details": {
                    "biomedclip": retriever.biomedclip_model is not None,
                    "faiss": retriever.faiss_index is not None,
                    "neo4j": retriever.neo4j_driver is not None,
                    "index_size": retriever.faiss_index.ntotal if retriever.faiss_index else 0
                }
            }
        else:
            return {
                    "available": False,
                    "message": "RAG 系统组件未完全加载"
                }
    except Exception as e:
        return {
            "available": False,
            "message": f"RAG 状态检查失败: {str(e)}"
        }


@app.post("/api/medical/analyze-with-rag-stream")
async def analyze_medical_image_with_rag_stream(
    image: UploadFile = File(...), 
    input_text: str = None,
    temperature: float = 0.8,
    top_p: float = 0.4,
    top_k: int = 3
):
    """完整的视频分析 - 支持多轮对话"""
    """
    🧠 RAG 增强的流式医学影像分析
    
    工作流程：
    1. 保存上传的医学影像
    2. 使用 BiomedCLIP 检索相似病例 (top_k 个)
    3. 从 Neo4j 查询相关医学知识
    4. 构建增强的 Prompt
    5. 调用 Qwen-VL API 进行流式分析
    
    响应格式：Server-Sent Events (SSE)
    - {"type": "metadata", "file_path": "...", "knowledge": {...}}
    - {"type": "content", "content": "文本块"}
    - {"type": "error", "error": "错误信息"}
    - data: [DONE]
    """
    
    # 1. 检查 RAG 系统是否可用
    if not RAG_ENABLED or not is_rag_available():
        # 如果 RAG 不可用，降级为普通分析
        print("⚠️  RAG 系统不可用，降级为普通分析")
        return await analyze_medical_image_stream(image, input_text, temperature, top_p)
    
    try:
        if not image.filename:
            async def error_generator():
                error_json = {"type": "error", "error": "未选择图像文件"}
                yield f"data: {json.dumps(error_json, ensure_ascii=False)}\n\n"
                yield "data: [DONE]\n\n"
            
            return StreamingResponse(
                error_generator(),
                media_type="text/event-stream",
                headers={
                    "Cache-Control": "no-cache",
                    "Connection": "keep-alive",
                    "X-Accel-Buffering": "no"
                }
            )
        
        # 创建上传目录
        upload_dir = os.path.join(DATA_DIR, "medical_uploads")
        os.makedirs(upload_dir, exist_ok=True)
        
        # 生成唯一文件名
        file_extension = os.path.splitext(image.filename)[1]
        filename = f"rag_analysis_{int(time.time())}{file_extension}"
        file_path = os.path.join(upload_dir, filename)
        
        # 保存文件
        with open(file_path, "wb") as buffer:
            content = await image.read()
            buffer.write(content)
        print(f"📁 图像已保存: {file_path}")
        
        # 3. RAG 检索和 Prompt 增强
        retriever = get_rag_retriever()
        original_question = input_text or "请详细分析这张医学影像的异常表现，并给出可能的诊断建议。"
        
        print(f"🔍 开始 RAG 检索 (top_k={top_k})...")
        enhanced_prompt, knowledge = retriever.retrieve_and_enhance(
            file_path,
                        original_question,
            top_k=top_k
        )
        print(f"✨ Prompt 已增强，检索到 {len(knowledge.get('cases', []))} 个相似病例")
          # 4. 使用增强的 Prompt 调用 Qwen API 进行流式分析
        def generate_analysis():
            """生成器函数：使用增强的 prompt 调用 Qwen API"""
            for chunk in medical_analyzer.analyze_medical_image_complete_stream(
                image_path=file_path,
                input_text=enhanced_prompt,  # 💡 使用 RAG 增强的 prompt
                temperature=temperature,
                top_p=top_p
            ):
                yield chunk
         # 5. 准备元数据（包含 file_path 和 RAG 知识）
        metadata = {
            "file_path": file_path,
            "rag_enabled": True,
            "similar_cases_count": len(knowledge.get("cases", [])),
            "knowledge_summary": {
                "findings": knowledge.get("findings", [])[:5],  # 最多显示5个
                "diseases": knowledge.get("diseases", [])[:5]
            },
            "knowledge_details": knowledge
        }
                # 6. 使用统一的流式响应包装器
        stream_generator = create_stream_generator(
            generate_analysis,
            include_metadata=metadata
        )
        return StreamingResponse(
            stream_generator(),
            media_type="text/event-stream",
            headers={
                "Cache-Control": "no-cache",
                "Connection": "keep-alive",
                "X-Accel-Buffering": "no"
            }
        )
    except Exception as e:
        print(f"❌ RAG 增强分析失败: {str(e)}")
        import traceback
        traceback.print_exc()
        async def error_generator():
            error_json = {"type": "error", "error": f"RAG 增强分析失败: {str(e)}"}
            yield f"data: {json.dumps(error_json, ensure_ascii=False)}\n\n"
            yield "data: [DONE]\n\n"
        return StreamingResponse(
            error_generator(),
            media_type="text/event-stream",
            headers={
                "Cache-Control": "no-cache",
                "Connection": "keep-alive",
                "X-Accel-Buffering": "no"
            }
        )

@app.post("/api/medical/update-patient")
async def update_patient_info(request: Request):
    """更新患者信息"""
    try:
        body = await request.json()
        name = body.get("name")
        age = body.get("age")
        gender = body.get("gender")
        history = body.get("history")
        
        result = medical_analyzer.update_patient_info(name, age, gender, history)
        
        return result
        
    except Exception as e:
        return JSONResponse(
            {"error": f"更新患者信息失败: {str(e)}"}, 
            status_code=500
        )



@app.get("/api/medical/patient-info")
async def get_patient_info():
    """获取当前患者信息"""
    return {
        "success": True,
        "patient_info": medical_analyzer.patient_info.copy()
    }

@app.post("/api/medical/generate-report")
async def generate_medical_report(request: Request):
    """生成医疗报告"""
    try:
        body = await request.json()
        analysis_history = body.get("analysis_history", [])
        chat_history = body.get("chat_history", [])
        patient_info = body.get("patient_info", {})
        
        # 如果有聊天历史，使用聊天历史生成报告
        if chat_history:
            report = medical_analyzer.generate_medical_report_from_chat(chat_history, patient_info)
        else:
            # 否则使用分析历史
            report = medical_analyzer.generate_medical_report(analysis_history)
        
        return {
            "success": True,
            "report": report,
            "timestamp": datetime.now().isoformat()
        }
        
    except Exception as e:
        return JSONResponse(
            {"error": f"报告生成失败: {str(e)}"}, 
            status_code=500
        )

# ===================== 百度翻译API ，最后更新时间2025-10-18 =====================
import random
from hashlib import md5

# Set your own appid/appkey.
appid = '20251013002474387'
appkey = 'sbdaZJn0eKEdwloyrhHe'

# For list of language codes, please refer to `https://api.fanyi.baidu.com/doc/21`
from_lang = 'en'
to_lang = 'zh'

endpoint = 'http://api.fanyi.baidu.com'
path = '/api/trans/vip/translate'
url = endpoint + path

# Generate salt and sign
def make_md5(s, encoding='utf-8'):
    return md5(s.encode(encoding)).hexdigest()

@app.post("/api/translate")
async def translate_text(request: Request):
    """调用百度翻译API进行文本翻译"""
    try:
        body = await request.json()
        query = body.get("text", "")
        
        if not query:
            return JSONResponse(
                {"error": "文本内容不能为空"}, 
                status_code=400
            )
        # 检查是否包含代码块
        query_parts = query.split("```")
        print(f"分割后的部分数量: {len(query_parts)}")
        has_code_block = len(query_parts) > 1
        
        # 如果有代码块，只翻译非代码块部分
        if has_code_block:
            translated_parts = []
            for i, part in enumerate(query_parts):
                # 代码块内容（奇数索引位置）不翻译
                if i % 2 == 1:
                    translated_parts.append(part)
                # 非代码块内容（偶数索引位置）需要翻译
                else:
                    if part.strip():
                        print(f"翻译第{i}部分: {part[:30]}...")
                        # 为每个需要翻译的部分生成salt和sign
                        salt = random.randint(32768, 65536)
                        sign = make_md5(appid + part + str(salt) + appkey)
                        
                        # 构建请求
                        headers = {'Content-Type': 'application/x-www-form-urlencoded'}
                        payload = {'appid': appid, 'q': part, 'from': from_lang, 'to': to_lang, 'salt': salt, 'sign': sign}
                        
                        # 发送请求
                        r = requests.post(url, params=payload, headers=headers)
                        result = r.json()
                        print(f"翻译结果: {result}")
                        
                        # 检查是否有错误
                        if 'error_code' in result:
                            print(f"翻译错误: {result['error_code']}")
                            # 翻译失败时保留原文
                            translated_parts.append(part)
                        elif 'trans_result' in result and len(result['trans_result']) > 0:
                            # 拼接所有翻译结果
                            translated_text_part = '\n\n'.join([item['dst'] for item in result['trans_result']])
                            translated_parts.append(translated_text_part)
                            # translated_parts.append(result['trans_result'][0]['dst'])
                        else:
                            translated_parts.append(part)
                    else:
                        translated_parts.append(part)
            
            # 组合翻译后的内容，保持原始格式
            translated_text = '\n```\n'.join(translated_parts)
            print(f"最终翻译结果: {translated_text[:100]}...")
            return {
                "success": True,
                "result": translated_text,
                "timestamp": datetime.now().isoformat()
            }
        # Generate salt and sign
        salt = random.randint(32768, 65536)
        sign = make_md5(appid + query + str(salt) + appkey)
        
        # Build request
        headers = {'Content-Type': 'application/x-www-form-urlencoded'}
        payload = {'appid': appid, 'q': query, 'from': from_lang, 'to': to_lang, 'salt': salt, 'sign': sign}
        
        # Send request
        r = requests.post(url, params=payload, headers=headers)
        result = r.json()
        print(result)
        
        # 检查是否有错误
        if 'error_code' in result:
            return JSONResponse(
                {"error": f"翻译失败: {result.get('error_msg', '未知错误')}"}, 
                status_code=500
            )
        
        # 直接返回dst字段的翻译结果
        if 'trans_result' in result and len(result['trans_result']) > 0:
            return {
                "success": True,
                
                # "result": result['trans_result'][0]['dst'],  # 直接返回dst字段
                "result": '\n\n'.join([item['dst'] for item in result['trans_result']]),
                "timestamp": datetime.now().isoformat()
            }
        else:
            return JSONResponse(
                {"error": "翻译结果格式异常"}, 
                status_code=500
            )
        
    except Exception as e:
        return JSONResponse(
            {"error": f"翻译请求失败: {str(e)}"}, 
            status_code=500
        )
    
@app.post("/api/math/stream")
async def math_stream(
    request: Request,
    session_id: str,
    model: str = Query(default="math-model", description="数学模型标识"),
    max_input_length: int = Query(default=1024, description="最大输入长度"),
    max_new_tokens: int = Query(default=8192, description="最大生成长度"),
    temperature: float = Query(default=0.6, description="温度参数"),
    top_p: float = Query(default=0.95, description="top_p参数"),
    do_sample: bool = Query(default=True, description="是否采样")
):
    """
    独立数学流式接口：使用requests库对接math_deploy.py的/solve_stream
    - 复用main.py的会话存储逻辑，保持与chat_stream一致的SSE格式
    """
    # 1. 解析请求体中的用户消息
    body = await request.json()
    messages: List[Dict[str, Any]] = body.get("messages", [])
    user_input = ""
    for m in reversed(messages):
        if m.get("role") == "user":
            user_input = m.get("content", "")
            break
    if not user_input:
        return JSONResponse({"error": "用户输入不能为空"}, status_code=400)

    # 2. 配置math_deploy服务信息
    math_api_url = os.environ.get("MATH_DEPLOY_URL", "http://10.143.12.79:8077/solve_stream")  # 不用https
    math_api_token = os.environ.get("MATH_API_TOKEN", None)
    module = "math"

    # 3. 构造请求头（含认证）
    headers = {"Content-Type": "application/json"}
    if math_api_token:
        headers["Authorization"] = f"Bearer {math_api_token}"

    # 4. 构造math_deploy所需参数
    math_request_data = {
        "problem": user_input,
        "max_input_length": max_input_length,
        "max_new_tokens": max_new_tokens,
        "temperature": temperature,
        "top_p": top_p,
        "do_sample": do_sample
    }

    # 5. 流式生成响应（核心逻辑，仿照chat_stream的实现）
    async def sse_generator():
        full_response = ""
        try:
            # 同步请求转异步执行（避免阻塞事件循环）
            def sync_request():
                # 调用math_deploy的流式接口（同步方式，与chat_stream逻辑一致）
                return requests.post(
                    math_api_url,
                    json=math_request_data,
                    headers=headers,
                    stream=True  # 关键：启用流式响应
                )

            # 使用asyncio.to_thread将同步请求放入线程池执行
            with await asyncio.to_thread(sync_request) as r:
                r.raise_for_status()  # 抛出HTTP错误（如401/500）

                # 迭代获取流式数据（与chat_stream处理逻辑一致）
                for chunk in r.iter_content(chunk_size=None, decode_unicode=True):
                    # 清理可能的特殊标记（根据math_deploy返回格式调整）
                    if chunk.startswith("[INST]"):
                        chunk = chunk[len("[INST]"):]
                    if "[/INST]" in chunk:
                        chunk = chunk.split("[/INST]")[-1]
                    if chunk:
                        full_response += chunk
                        yield f"data: {chunk}\n\n"
                        await asyncio.sleep(0.03)  # 控制流速度

        except requests.exceptions.RequestException as e:
            # 模型服务调用失败（如连接超时、认证失败）
            error_msg = f"数学服务请求失败: {str(e)}（检查math_deploy是否启动）"
            yield f"data: {error_msg}\n\n"
        except Exception as e:
            # 其他系统错误
            error_msg = f"系统错误: {str(e)}"
            yield f"data: {error_msg}\n\n"
        finally:
            # 流式结束标识（与chat_stream保持一致）
            yield "data: [DONE]\n\n"

        # 6. 结束后持久化消息（复用main.py的存储逻辑）
        with LOCK:
            # 检查会话是否存在，不存在则创建
            sessions = load_sessions(module)
            session_exists = any(s["id"] == session_id for s in sessions)
            if not session_exists:
                ts = now_iso()
                sessions.append({
                    "id": session_id,
                    "title": f"数学会话 {session_id[:6]}",
                    "module": module,
                    "created_at": ts,
                    "updated_at": ts
                })
                save_sessions(sessions, module)
                save_messages(session_id, [], module)

            # 追加消息（与chat_stream的存储逻辑完全一致）
            msgs = load_messages(session_id, module)
            # 移除未完成的assistant消息（避免重复）
            msgs = [msg for msg in msgs if not (msg.get("role") == "assistant" and msg.get("content") == "")]
            # 追加用户消息（如果最新消息不是用户）
            if not messages or messages[-1].get("role") != "user":
                msgs.append({"role": "user", "content": user_input, "created_at": now_iso()})
            # 追加assistant响应
            msgs.append({"role": "assistant", "content": full_response, "created_at": now_iso()})
            save_messages(session_id, msgs, module)

            # 更新会话时间
            for s in sessions:
                if s["id"] == session_id:
                    s["updated_at"] = now_iso()
                    break
            save_sessions(sessions, module)

    # 7. 返回SSE流式响应（头信息与chat_stream一致）
    response_headers = {
        "Content-Type": "text/event-stream; charset=utf-8",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "X-Accel-Buffering": "no",
    }
    return StreamingResponse(sse_generator(), headers=response_headers)

# ===================== 视频分析API ，最后更新时间2025-11-04 =====================
VIDEO_UPLOADS_DIR = os.path.join(DATA_DIR, "video_uploads")
os.makedirs(VIDEO_UPLOADS_DIR, exist_ok=True)

@app.post("/api/video/upload-video")
async def upload_video_file(file: UploadFile = File(...)):
    """上传视频文件"""
    try:
        if not file.content_type.startswith('video/'):
            return JSONResponse({"error": "只支持视频文件"}, status_code=400)
        timestamp = int(time.time())
        filename = f"{timestamp}_{file.filename}"
        file_path = os.path.join(VIDEO_UPLOADS_DIR, filename)
        with open(file_path, "wb") as buffer:
            shutil.copyfileobj(file.file, buffer)
        return {"success": True, "filename": filename, "file_path": file_path, "message": "视频文件上传成功"}
    except Exception as e:
        return JSONResponse({"error": f"文件上传失败: {str(e)}"}, status_code=500)

@app.post("/api/video/analyze-complete")
async def analyze_video_complete(
    video: UploadFile = File(...), 
    input_text: str = Form(None),
    chat_history: str = Form(None),
    temperature: float = Form(0.8),
    top_p: float = Form(0.4),
    max_tokens: int = Form(1024),
):
    """完整的视频分析 - 支持多轮对话"""
    try:
        if not video.filename:
            return JSONResponse({"error": "未选择视频文件"}, status_code=400)

        file_extension = os.path.splitext(video.filename)[1]
        filename = f"complete_analysis_{int(time.time())}{file_extension}"
        file_path = os.path.join(VIDEO_UPLOADS_DIR, filename)

        with open(file_path, "wb") as buffer:
            content = await video.read()
            buffer.write(content)

        history_list = None
        if chat_history:
            try:
                history_list = json.loads(chat_history)
            except json.JSONDecodeError:
                history_list = None

        result = video_analyzer.analyze_video_complete(
            file_path,
            input_text,
            history_list,
            temperature,
            top_p,
            max_tokens,
        )

        try:
            if isinstance(result, dict) and result.get("success") and isinstance(result.get("analysis_result"), str):
                text = result.get("analysis_result", "")
                if text.strip():
                    salt = random.randint(32768, 65536)
                    sign = make_md5(appid + text + str(salt) + appkey)
                    headers = {'Content-Type': 'application/x-www-form-urlencoded'}
                    payload = {'appid': appid, 'q': text, 'from': from_lang, 'to': to_lang, 'salt': salt, 'sign': sign}
                    r = requests.post(url, params=payload, headers=headers, timeout=30)
                    data = r.json()
                    if 'trans_result' in data and len(data['trans_result']) > 0:
                        translated = '\n\n'.join([item['dst'] for item in data['trans_result']])
                        if translated:
                            result['analysis_result'] = translated
        except Exception:
            # 翻译失败静默忽略，返回原文
            pass

        try:
            os.remove(file_path)
        except Exception:
            pass

        return result
    except Exception as e:
        return JSONResponse({"error": f"分析失败: {str(e)}"}, status_code=500)


import json, os, asyncio, uuid
from clean_text import extract_security_md
from utils import translate_text
from model_client import analyze_contract as analyze_contract_via_service, check_model_service_health

# ===========================================
#  模型服务连接检查
# ===========================================
# 模型推理服务独立部署，主系统通过 HTTP API 调用
# 模型服务地址通过环境变量 MODEL_SERVICE_URL 配置
# 默认地址: http://localhost:8010

@app.on_event("startup")
async def startup_event():
    """应用启动时检查模型服务连接"""
    print("🚀 [应用启动] FastAPI 应用正在启动...")
    print("🔗 [模型服务] 检查模型推理服务连接...")
    
    if check_model_service_health():
        print("✅ [模型服务] 模型推理服务连接正常")
        print("🚀 [应用启动] 后端服务已就绪")
    else:
        print("⚠️ [模型服务] 模型推理服务未就绪或无法连接")
        print("⚠️ [应用启动] 后端服务已启动，但模型服务不可用")
        print("💡 [提示] 请确保模型推理服务已启动（运行 model_service.py）")

# ===================== 智能合约安全审计API =====================

@app.post("/api/contract/analyze")
async def analyze_contract(request: Request):

    """智能合约安全审计分析 - 使用真实的大语言模型"""
    print("\n" + "="*80)
    print(f"🔵 [合约审计请求]")
    
    try:
        body = await request.json()
        source_code = body.get("source_code", "")
        
        print(f"📥 [前端输入] 合约代码长度: {len(source_code)} 字符")
        print(f"📥 [前端输入] 合约代码 (前500字符):\n{source_code[:500]}{'...' if len(source_code) > 500 else ''}")
        
        if not source_code or not isinstance(source_code, str) or len(source_code.strip()) < 10:
            print(f"❌ [错误] 合约代码无效: 长度={len(source_code)}")
            print("="*80 + "\n")
            return JSONResponse(
                {"error": "source_code 不能为空且需≥10字符"}, 
                status_code=400
            )
        
        # 获取漏洞类型（默认使用 inconsistent_state）
        vuln_type = body.get("vuln_type", "Reentrancy")
        print(f"📊 [分析参数] 漏洞类型: {vuln_type}")
        
        # 调用模型推理服务进行合约分析
        print("🤖 [模型调用] 开始调用模型推理服务进行分析...")
        try:
            # 检查模型服务是否可用
            if not check_model_service_health():
                print("⚠️ [模型服务] 模型服务不可用，使用模拟分析结果")
                # 返回模拟的分析结果
                analysis_text_en = f"""
## Contract Security Analysis - {vuln_type} Vulnerability Check

### Analysis Summary
This contract has been analyzed for {vuln_type} vulnerabilities. The analysis shows that this is a simple demonstration contract.

### Key Findings
- **Contract Structure**: The contract follows basic Solidity patterns
- **Function Analysis**: Contains standard contract functions
- **Security Checks**: Basic input validation is present

### Recommendations
- Consider implementing additional security measures
- Add comprehensive input validation
- Use established security libraries like OpenZeppelin

### Risk Assessment
- **Overall Risk**: Low
- **Confidence**: High
- **Status**: Analysis Completed Successfully
                """.strip()

                has_vulnerability = False
                status_text_en = "Analysis Completed - No Critical Vulnerabilities Found"
            else:
                # 通过 HTTP API 调用模型服务
                llm_result = analyze_contract_via_service(
                    source_code=source_code,
                    vuln_type=vuln_type
                )

                print(f"🤖 [模型输出] 检测结果: {llm_result.get('status', 'Unknown')}")
                print(f"🤖 [模型输出] 发现漏洞: {llm_result.get('has_vulnerability', False)}")
                print(f"🤖 [模型输出] 分析内容长度: {len(llm_result.get('analysis', ''))} 字符")

                # 将 LLM 返回格式转换为前端期望的格式
                analysis_text_en = llm_result.get('analysis', '')
                has_vulnerability = llm_result.get('has_vulnerability', False) or llm_result.get('detected', False)
                status_text_en = llm_result.get('status', '审计完成')

            # 翻译模型输出为中文（包含 Result 部分）
            print("🌐 [翻译] 开始翻译模型输出为中文...")
            status_text = translate_text(status_text_en)
            print(f"🌐 [翻译] 状态翻译完成: {status_text_en} -> {status_text}")

            analysis_text = translate_text(analysis_text_en)
            print(f"🌐 [翻译] 分析内容翻译完成 ({len(analysis_text)} 字符)")
            
            # 尝试从结构化输出中提取摘要
            # 优先从"合约函数分析"部分提取前几句话作为摘要
            summary = None
            if '## 合约函数分析' in analysis_text or '## Contract Function Analysis' in analysis_text:
                # 提取合约函数分析部分的前几句话作为摘要
                func_analysis_match = re.search(
                    r'##\s*(?:合约函数分析|Contract Function Analysis)\s*\n(.*?)(?=\n##|$)',
                    analysis_text,
                    re.DOTALL | re.IGNORECASE
                )
                if func_analysis_match:
                    func_text = func_analysis_match.group(1).strip()
                    # 取前200字符作为摘要
                    if len(func_text) > 200:
                        summary = func_text[:200] + '...'
                    else:
                        summary = func_text
            
            # 如果没有找到摘要或提取失败，使用默认摘要
            if not summary:
                if has_vulnerability:
                    summary = f"检测到安全漏洞: {status_text}"
                else:
                    summary = f"未检测到安全漏洞: {status_text}"
            
            # 从分析文本中提取 findings（如果模型返回了结构化信息）
            findings = []
            if '## Vulnerability Identification' in analysis_text or '## 漏洞识别' in analysis_text or '漏洞' in analysis_text:
                # 尝试提取漏洞部分
                vuln_section_match = re.search(
                    r'##\s*(?:Vulnerability Identification|漏洞识别|漏洞发现)\s*\n(.*?)(?=\n##|$)',
                    analysis_text,
                    re.DOTALL | re.IGNORECASE
                )
                if vuln_section_match:
                    vuln_section = vuln_section_match.group(1).strip()
                    # 尝试解析漏洞列表
                    # 查找包含 "High", "Medium", "Low" 或 "高", "中", "低" 的行
                    vuln_lines = re.findall(
                        r'[-*]\s*(?:.*?)(?:High|Medium|Low|高|中|低|严重|中等|轻微).*?(?:\n|$)',
                        vuln_section,
                        re.MULTILINE | re.IGNORECASE
                    )
                    for i, vuln_line in enumerate(vuln_lines[:5]):  # 最多提取5个
                        # 确定严重程度
                        severity = 'Medium'
                        if any(keyword in vuln_line for keyword in ['High', '高', '严重', 'Critical']):
                            severity = 'High'
                        elif any(keyword in vuln_line for keyword in ['Low', '低', '轻微', 'Info']):
                            severity = 'Low'
                        
                        # 提取漏洞描述
                        description = vuln_line.strip('- *').strip()
                        if len(description) > 200:
                            description = description[:200] + '...'
                        
                        findings.append({
                            "swcId": f"FINDING-{i+1}",
                            "title": f"安全问题 {i+1}",
                            "severity": severity,
                            "description": description,
                            "recommendation": "请查看详细分析报告中的安全建议部分",
                            "confidence": 0.85
                        })
            
            # 生成 Markdown 格式的报告
            # 如果分析文本已经包含结构化格式（Markdown标题），直接使用
            # 否则添加标准格式
            analysis_text = extract_security_md(analysis_text)
            if analysis_text.strip().startswith('##'):
                # 分析文本已经包含结构化格式，直接使用
                report_markdown = f"""# 智能合约安全审计报告

审计时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}

漏洞类型: {vuln_type}

检测状态: {status_text}

发现漏洞: {'是' if has_vulnerability else '否'}

---

{analysis_text}

---
*本报告由大语言模型自动生成*
                """
            else:
                # 使用标准格式包装
                report_markdown = f"""# 智能合约安全审计报告

                **审计时间**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}

                **漏洞类型**: {vuln_type}

                ## 检测结果

                **状态**: {status_text}

                **发现漏洞**: {'是' if has_vulnerability else '否'}

                ## 详细分析

                {analysis_text}

                ---
                *本报告由大语言模型自动生成*
                """
            
            # 构造返回数据
            result = {
                "success": True,
                "summary": summary,
                "status": status_text,
                "findings": findings,  # 从模型分析中提取的发现项
                "report_markdown": report_markdown,
                "report": report_markdown,  # 兼容前端可能期望的report字段
                "has_vulnerability": has_vulnerability,
                "analysis": analysis_text,
                "timestamp": datetime.now().isoformat()
            }
            print(analysis_text)
            
            print(f"📤 [后端输出] 审计完成")
            print(f"📤 [后端输出] 发现漏洞: {has_vulnerability}")
            print(f"📤 [后端输出] 摘要: {summary[:200]}{'...' if len(summary) > 200 else ''}")
            print(f"📤 [后端输出] 报告长度: {len(report_markdown)} 字符")
            print("="*80 + "\n")
            
            return {
                "success": True,
                "data": result
            }
            
        except Exception as e:
            error_msg = str(e)
            print(f"❌ [错误] 模型分析失败: {error_msg}")
            import traceback
            print(f"堆栈跟踪:\n{traceback.format_exc()}")
            print("="*80 + "\n")
            return JSONResponse(
                {"success": False, "error": f"模型分析失败: {error_msg}"}, 
                status_code=500
            )
        
    except Exception as e:
        return JSONResponse(
            {"error": f"分析失败: {str(e)}"}, 
            status_code=500
        )
        
@app.get("/api/contract/examples")
async def get_contract_examples():
    """获取示例合约代码"""
    examples = {
        "vulnerable": {
            "name": "存在漏洞的示例合约",
            "code": """pragma solidity ^0.8.0;
contract Test {
    function withdraw() external {
        msg.sender.call{value: 1 ether}("");
    }
}"""
        },
        "safe": {
            "name": "安全的示例合约",
            "code": """// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;

import "@openzeppelin/contracts/security/ReentrancyGuard.sol";
import "@openzeppelin/contracts/access/Ownable.sol";

contract SafeContract is ReentrancyGuard, Ownable {
    mapping(address => uint256) public balances;
    
    event Withdrawal(address indexed user, uint256 amount);
    
    constructor() Ownable(msg.sender) {}
    
    function withdraw(uint256 amount) external nonReentrant {
        require(balances[msg.sender] >= amount, "Insufficient balance");
        
        // CEI 模式: Checks-Effects-Interactions
        balances[msg.sender] -= amount;  // 先更新状态
        
        (bool success, ) = msg.sender.call{value: amount}("");
        require(success, "Transfer failed");
        
        emit Withdrawal(msg.sender, amount);
    }
    
    function deposit() external payable {
        balances[msg.sender] += msg.value;
    }
}"""
        }
    }
    
    return {"success": True, "examples": examples}