import asyncio
import traceback
import os
import sys
import hashlib
import shutil
import time
import json
from datetime import datetime
from typing import Optional, List, Dict, Any

from fastapi import FastAPI, Depends, BackgroundTasks, HTTPException, Query, Response
from langchain_core.messages import SystemMessage
from starlette.responses import StreamingResponse
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select, func, text, desc
import sqlalchemy as sa
from pydantic import BaseModel

from config.crypto_config import SYSTEM_MESSAGE_TEMPLATES
# 更新导入路径
from db import get_db, engine, AsyncSessionLocal
from db.models.base import Base
from db.models.scan import ScanTask, ScanResult, AIAnalysisResult, ScanRecord
from schemas.base import ScanRequest, TaskWithResults, ScanResultSchema, ScanTaskListSchema, ScanTaskListResponse, ScanResultDetailSchema, ScanResultDetailListResponse, ScanResultDetailRequest, KnowledgeBaseChatRequest
from services.scanner.scanner import Scanner
from config.settings import settings
from utils.repo import clone_repo

# 导入核心服务
from core import ScanService, TaskService, ExportService
from core.utils.file_utils import cleanup_old_dirs
from services.chat import ChatService
from services.llm import LLMService

# 导入文件上传路由
from api.file_routes import router as file_router

# 创建FastAPI应用
app = FastAPI(title="代码扫描服务")

# 临时目录配置
TEMP_DIR = "/tmp/yxyweb"
TEMP_FILE_RETENTION_MINUTES = 30

# 确保临时目录存在
os.makedirs(TEMP_DIR, exist_ok=True)


@app.on_event("startup")
async def startup_event():
    """应用启动时执行的操作"""
    print("\n应用启动...")
    print("使用手动管理的数据库表，跳过自动表检查")

    # 清理旧的临时目录
    cleanup_old_dirs(TEMP_DIR, TEMP_FILE_RETENTION_MINUTES)


@app.get("/")
async def read_root():
    """根路径返回服务状态"""
    return {"msg": f"{settings.APP_NAME} 后端服务已启动"}


@app.post("/scan")
async def start_scan(request: ScanRequest, background_tasks: BackgroundTasks, db: AsyncSession = Depends(get_db)):
    """启动扫描任务"""
    try:
        # 使用扫描服务
        scan_service = ScanService(db)
        result = await scan_service.start_scan(request)
        
        # 启动异步扫描
        print("\n启动异步扫描...")
        background_tasks.add_task(run_scan_task, result["task_id"], request.repo_url, request.branch, request.scan_type)
        print("扫描任务已添加到后台队列")
        
        return result
        
    except Exception as e:
        print(f"启动扫描任务时发生错误: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/scan/list", response_model=ScanTaskListResponse)
async def get_scan_list(
        db: AsyncSession = Depends(get_db),
        page: int = Query(1, ge=1, description="页码，从1开始"),
        page_size: int = Query(10, ge=1, le=100, description="每页大小，最大100"),
        repo_url: Optional[str] = Query(None, description="仓库URL筛选"),
        branch: Optional[str] = Query(None, description="分支筛选"),
        scan_type: Optional[str] = Query(None, description="扫描类型筛选"),
        status: Optional[str] = Query(None, description="任务状态筛选"),
        # 支持多种时间参数格式
        start_time: Optional[str] = Query(None, description="开始时间，支持ISO格式或YYYY-MM-DD HH:MM:SS格式"),
        end_time: Optional[str] = Query(None, description="结束时间，支持ISO格式或YYYY-MM-DD HH:MM:SS格式"),
        created_at_start: Optional[str] = Query(None, description="创建时间开始，支持ISO格式或YYYY-MM-DD HH:MM:SS格式"),
        created_at_end: Optional[str] = Query(None, description="创建时间结束，支持ISO格式或YYYY-MM-DD HH:MM:SS格式"),
        updated_at_start: Optional[str] = Query(None, description="更新时间开始，支持ISO格式或YYYY-MM-DD HH:MM:SS格式"),
        updated_at_end: Optional[str] = Query(None, description="更新时间结束，支持ISO格式或YYYY-MM-DD HH:MM:SS格式"),
        # 支持多种格式的时间范围参数
        created_at_range: Optional[str] = Query(None, description="创建时间范围，支持格式：1) start_time,end_time 2) [start_time,end_time]"),
        updated_at_range: Optional[str] = Query(None, description="更新时间范围，支持格式：1) start_time,end_time 2) [start_time,end_time]")
):
    """获取扫描任务列表"""
    try:
        # 构建筛选条件
        filters = {
            'page': page,
            'page_size': page_size,
            'repo_url': repo_url,
            'branch': branch,
            'scan_type': scan_type,
            'status': status,
            'start_time': start_time,
            'end_time': end_time,
            'created_at_start': created_at_start,
            'created_at_end': created_at_end,
            'updated_at_start': updated_at_start,
            'updated_at_end': updated_at_end,
            'created_at_range': created_at_range,
            'updated_at_range': updated_at_range
        }
        
        # 使用扫描服务
        scan_service = ScanService(db)
        result = await scan_service.get_scan_list(filters)
        
        return ScanTaskListResponse(**result)
        
    except Exception as e:
        print(f"获取扫描任务列表时发生错误: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/scan/{task_id}", response_model=TaskWithResults)
async def get_scan_result(task_id: int, db: AsyncSession = Depends(get_db)):
    """获取扫描结果"""
    try:
        # 使用扫描服务
        scan_service = ScanService(db)
        result = await scan_service.get_scan_result(task_id)
        
        return TaskWithResults(**result)
        
    except ValueError as e:
        raise HTTPException(status_code=404, detail=str(e))
    except Exception as e:
        print(f"获取扫描结果时发生错误: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/scan/{task_id}/analysis")
async def get_scan_analysis(
        task_id: int,
        db: AsyncSession = Depends(get_db)
):
    """获取扫描分析结果"""
    try:
        # 获取AI分析结果
        analysis_results = await db.execute(
            select(AIAnalysisResult).where(AIAnalysisResult.task_id == task_id)
        )
        analysis_results = analysis_results.scalars().all()
        
        return {
            "task_id": task_id,
            "analysis_results": [
                {
                    "id": result.id,
                    "task_id": result.task_id,
                    "file_path": result.file_path,
                    "function_name": result.function_name,
                    "start_line": result.start_line,
                    "end_line": result.end_line,
                    "analysis_content": result.analysis_content,
                    "created_at": result.created_at
                }
                for result in analysis_results
            ]
        }
        
    except Exception as e:
        print(f"获取扫描分析结果时发生错误: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


async def run_scan_task(task_id: int, repo_url: str, branch: str, scan_type: str):
    """运行扫描任务的后台函数"""
    print(f"\n开始执行扫描任务 {task_id}")
    print(f"仓库地址: {repo_url}")
    print(f"分支: {branch}")
    print(f"扫描类型: {scan_type}")
    
    async with AsyncSessionLocal() as db:
        try:
            # 更新任务状态为运行中
            await db.execute(
                text("UPDATE scan_tasks SET status = 'running', updated_at = :updated_at WHERE id = :task_id"),
                {"task_id": task_id, "updated_at": datetime.now()}
            )
            await db.commit()
            
            # 生成临时目录
            temp_dir = os.path.join(TEMP_DIR, hashlib.md5(f"{repo_url}:{branch}".encode()).hexdigest())
            
            # 检查目录是否存在
            if os.path.exists(temp_dir):
                # 检查目录是否过期（超过24小时）
                dir_age = time.time() - os.path.getctime(temp_dir)
                if dir_age > 24 * 3600:  # 24小时
                    print(f"目录已过期，重新克隆: {temp_dir}")
                    shutil.rmtree(temp_dir)
                else:
                    print(f"使用已存在的目录: {temp_dir}")
            else:
                print(f"创建新目录: {temp_dir}")
            
            # 克隆仓库
            print(f"克隆仓库到: {temp_dir}")
            if not clone_repo(repo_url, temp_dir, branch):
                raise Exception(f"克隆仓库失败: {repo_url}")
            
            # 执行扫描 - 确保传递正确的参数
            scanner = Scanner(task_id)
            scan_results = await scanner.scan_project(temp_dir, scan_type)
            
            # 转换扫描结果为统一格式
            unified_results = []
            
            if scan_type == "all":
                # 处理所有扫描器的结果
                for scanner_name, results in scan_results.items():
                    if scanner_name == "ai" and isinstance(results, list):
                        # AI扫描结果已经是列表格式
                        unified_results.extend(results)
                    elif scanner_name in ["clang", "cppcheck"] and isinstance(results, dict):
                        # 处理clang和cppcheck的结果
                        if results.get("status") == "success" and "issues" in results:
                            for issue in results["issues"]:
                                unified_results.append({
                                    "file_path": "",
                                    "function_name": "",
                                    "start_line": 0,
                                    "end_line": 0,
                                    "line_number": "0",
                                    "code_snippet": "",
                                    "issue_type": issue.get("type", ""),
                                    "severity": issue.get("severity", "medium").lower(),
                                    "message": issue.get("message", ""),
                                    "suggestion": ""
                                })
            elif scan_type == "ai":
                # AI扫描结果
                if isinstance(scan_results, list):
                    unified_results = scan_results
                else:
                    # 如果返回的是字典格式，尝试提取AI结果
                    unified_results = scan_results.get("ai", []) if isinstance(scan_results, dict) else []
            elif scan_type in ["clang", "cppcheck"] and isinstance(scan_results, dict):
                # 单个扫描器结果
                if scan_results.get("status") == "success" and "issues" in scan_results:
                    for issue in scan_results["issues"]:
                        unified_results.append({
                            "file_path": "",
                            "function_name": "",
                            "start_line": 0,
                            "end_line": 0,
                            "line_number": "0",
                            "code_snippet": "",
                            "issue_type": issue.get("type", ""),
                            "severity": issue.get("severity", "medium").lower(),
                            "message": issue.get("message", ""),
                            "suggestion": ""
                        })
            
            # 保存扫描结果
            for result in unified_results:
                await db.execute(
                    text("""
                        INSERT INTO scan_results 
                        (task_id, file_path, function_name, start_line, end_line, line_number, code_snippet, issue_type, severity, message, suggestion, created_at) 
                        VALUES (:task_id, :file_path, :function_name, :start_line, :end_line, :line_number, :code_snippet, :issue_type, :severity, :message, :suggestion, :created_at)
                    """),
                    {
                        "task_id": task_id,
                        "file_path": result.get("file_path", ""),
                        "function_name": result.get("function_name", ""),
                        "start_line": result.get("start_line", 0),
                        "end_line": result.get("end_line", 0),
                        "line_number": result.get("line_number", "0"),
                        "code_snippet": result.get("code_snippet", ""),
                        "issue_type": result.get("issue_type", result.get("category", "")),
                        "severity": result.get("severity", "medium"),
                        "message": result.get("message", result.get("description", "")),
                        "suggestion": result.get("suggestion", result.get("repair_suggestions", "")),
                        "created_at": datetime.now()
                    }
                )
            
            # 更新任务状态为完成
            await db.execute(
                text("UPDATE scan_tasks SET status = 'completed', updated_at = :updated_at WHERE id = :task_id"),
                {"task_id": task_id, "updated_at": datetime.now()}
            )
            await db.commit()
            
            print(f"扫描任务 {task_id} 完成，共发现 {len(unified_results)} 个问题")
            
        except Exception as e:
            print(f"扫描任务 {task_id} 执行失败: {str(e)}")
            print(f"错误堆栈: {traceback.format_exc()}")
            
            # 更新任务状态为失败
            try:
                await db.execute(
                    text("UPDATE scan_tasks SET status = 'failed', updated_at = :updated_at WHERE id = :task_id"),
                    {"task_id": task_id, "updated_at": datetime.now()}
                )
                await db.commit()
            except Exception as commit_error:
                print(f"更新任务状态失败: {str(commit_error)}")

@app.post("/scan/{task_id}/details", response_model=ScanResultDetailListResponse)
async def get_scan_result_details_post(
        task_id: int,
        request: ScanResultDetailRequest,
        db: AsyncSession = Depends(get_db)
):
    """获取扫描结果详情（POST方式）"""
    try:
        # 使用任务服务
        task_service = TaskService(db)
        result = await task_service.get_task_details(task_id, request.dict())
        
        return ScanResultDetailListResponse(**result)
        
    except ValueError as e:
        raise HTTPException(status_code=404, detail=str(e))
    except Exception as e:
        print(f"获取扫描结果详情时发生错误: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.delete("/scan/delete/{task_id}")
async def delete_scan_task(task_id: int, db: AsyncSession = Depends(get_db)):
    """删除扫描任务"""
    try:
        # 使用任务服务
        task_service = TaskService(db)
        result = await task_service.delete_task(task_id)
        
        return result
        
    except ValueError as e:
        raise HTTPException(status_code=404, detail=str(e))
    except Exception as e:
        print(f"删除扫描任务时发生错误: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/scan/{task_id}/retry")
async def retry_scan_task(task_id: int, db: AsyncSession = Depends(get_db)):
    """重试扫描任务"""
    try:
        # 使用任务服务
        task_service = TaskService(db)
        result = await task_service.retry_task(task_id)
        
        return result
        
    except ValueError as e:
        raise HTTPException(status_code=404, detail=str(e))
    except Exception as e:
        print(f"重试扫描任务时发生错误: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/scan/{task_id}/status")
async def get_task_status(task_id: int, db: AsyncSession = Depends(get_db)):
    """获取任务状态"""
    try:
        # 使用任务服务
        task_service = TaskService(db)
        result = await task_service.get_task_status(task_id)
        
        return result
        
    except ValueError as e:
        raise HTTPException(status_code=404, detail=str(e))
    except Exception as e:
        print(f"获取任务状态时发生错误: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/scan/{task_id}/export")
async def export_scan_result_details(
        task_id: int,
        db: AsyncSession = Depends(get_db),
        file_path: Optional[str] = Query(None, description="文件路径筛选"),
        function_name: Optional[str] = Query(None, description="函数名称筛选"),
        issue_type: Optional[str] = Query(None, description="问题类型筛选"),
        severity: Optional[str] = Query(None, description="严重级别筛选"),
        format: str = Query("csv", description="导出格式，支持csv和excel")
):
    """导出扫描结果详情"""
    try:
        # 使用导出服务
        export_service = ExportService(db)
        result = await export_service.export_scan_data(task_id, format)
        
        # 设置响应头
        content_type = "text/csv" if format.lower() == "csv" else "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
        
        return Response(
            content=result["content"],
            media_type=content_type,
            headers={"Content-Disposition": f"attachment; filename={result['filename']}"}
        )
        
    except ValueError as e:
        raise HTTPException(status_code=404, detail=str(e))
    except Exception as e:
        print(f"导出扫描结果详情时发生错误: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/export/all")
async def export_all_data(
        db: AsyncSession = Depends(get_db),
        format: str = Query("csv", description="导出格式，支持csv和excel"),
        repo_url: Optional[str] = Query(None, description="仓库URL筛选"),
        branch: Optional[str] = Query(None, description="分支筛选"),
        scan_type: Optional[str] = Query(None, description="扫描类型筛选"),
        status: Optional[str] = Query(None, description="任务状态筛选"),
        start_time: Optional[str] = Query(None, description="开始时间"),
        end_time: Optional[str] = Query(None, description="结束时间")
):
    """导出所有扫描数据"""
    try:
        # 构建筛选条件
        filters = {
            'repo_url': repo_url,
            'branch': branch,
            'scan_type': scan_type,
            'status': status,
            'start_time': start_time,
            'end_time': end_time
        }
        
        # 使用导出服务
        export_service = ExportService(db)
        result = await export_service.export_all_data(filters, format)
        
        # 设置响应头
        content_type = "text/csv" if format.lower() == "csv" else "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
        
        return Response(
            content=result["content"],
            media_type=content_type,
            headers={"Content-Disposition": f"attachment; filename={result['filename']}"}
        )
        
    except ValueError as e:
        raise HTTPException(status_code=404, detail=str(e))
    except Exception as e:
        print(f"导出所有数据时发生错误: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/health")
async def health_check():
    """健康检查端点"""
    return {"status": "healthy", "timestamp": datetime.now().isoformat()}


@app.post("/chat/chat")
async def direct_chat(request: KnowledgeBaseChatRequest, db: AsyncSession = Depends(get_db)):
    """直接聊天接口 - 直接请求智谱LLM，返回流式响应"""

    async def generate_stream():
        """生成流式响应"""
        try:
            yield f"data: {json.dumps({'type': 'status', 'message': '开始处理您的请求...', 'step': 'start'}, ensure_ascii=False)}\n\n"
            yield f"data: {json.dumps({'type': 'status', 'message': '初始化AI模型服务...', 'step': 'llm_init'}, ensure_ascii=False)}\n\n"
            async with LLMService() as llm_service:
                yield f"data: {json.dumps({'type': 'status', 'message': '✅ AI模型服务初始化完成', 'step': 'llm_init_complete'}, ensure_ascii=False)}\n\n"
                yield f"data: {json.dumps({'type': 'status', 'message': '🤖 AI正在生成回答...', 'step': 'llm_generation'}, ensure_ascii=False)}\n\n"
                
                # 构建包含历史记录的messages
                llm_messages = []
                
                # 添加系统提示词
                llm_messages.append({"role": "system", "content": "你是名叫元小猿的专业智能助手，请根据用户提问内容回答用户问题。"})
                
                # 添加历史对话记录
                if request.include_history and request.history:
                    # 限制历史记录长度，防止token超限
                    history_to_include = request.history[-request.max_history_length:] if len(request.history) > request.max_history_length else request.history
                    for msg in history_to_include:
                        if msg.role in ["user", "assistant"]:
                            llm_messages.append({"role": msg.role, "content": msg.content})
                
                # 添加当前用户查询
                llm_messages.append({"role": "user", "content": "你是吉大正元公司开发的名为元小猿的专业智能助手,你现在要回答用户的问题" + request.query})
                
                web_search_enabled = request.web_search_enabled
                final_response = ""
                web_search_results = []
                async for chunk in llm_service.stream_chat_completion(llm_messages, web_search_enabled):
                    if chunk["type"] == "text":
                        final_response += chunk["data"]
                        yield f"data: {json.dumps({'type': 'llm_stream', 'content': chunk['data'], 'step': 'direct_stream'}, ensure_ascii=False)}\n\n"
                    elif chunk["type"] == "web_search":
                        web_search_results = chunk["data"]
                yield f"data: {json.dumps({'type': 'answer', 'content': final_response, 'step': 'answer_complete'}, ensure_ascii=False)}\n\n"
                if web_search_results:
                    yield f"data: {json.dumps({'type': 'web_search', 'data': web_search_results, 'step': 'web_search_results'}, ensure_ascii=False)}\n\n"
                response_data = {
                    "status": "success",
                    "query": request.query,
                    "response": final_response,
                    "conversation_id": request.conversation_id,
                    "model": request.model_name,
                    "usage": {},
                    "response_source": "direct_llm",
                    "timestamp": datetime.now().isoformat(),
                    "web_search_results": web_search_results
                }
                yield f"data: {json.dumps({'type': 'complete', 'data': response_data, 'step': 'complete'}, ensure_ascii=False)}\n\n"
                yield f"data: {json.dumps({'type': 'status', 'message': '🎉 处理完成！', 'step': 'finished'}, ensure_ascii=False)}\n\n"
        except Exception as e:
            error_message = f"处理过程中发生错误: {str(e)}"
            yield f"data: {json.dumps({'type': 'error', 'message': error_message, 'step': 'error'}, ensure_ascii=False)}\n\n"
            print(f"直接聊天处理失败: {str(e)}")
    return StreamingResponse(
        generate_stream(),
        media_type="text/plain",
        headers={
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Content-Type": "text/event-stream",
            "Access-Control-Allow-Origin": "*",
            "Access-Control-Allow-Headers": "*",
        }
    )


@app.post("/chat/chat/stream")
async def knowledge_base_chat_stream(request: KnowledgeBaseChatRequest, db: AsyncSession = Depends(get_db)):
    """流式对话接口 - 实时显示处理过程"""
    
    async def generate_stream():
        """生成流式响应"""
        try:
            # 使用聊天服务
            chat_service = ChatService(db=db)
            
            # 1. 发送开始处理的消息
            yield f"data: {json.dumps({'type': 'status', 'message': '开始处理您的请求...', 'step': 'start'}, ensure_ascii=False)}\n\n"
            
            # 2. 验证请求参数
            yield f"data: {json.dumps({'type': 'status', 'message': '验证请求参数...', 'step': 'validation'}, ensure_ascii=False)}\n\n"
            await chat_service._validate_request(request)
            yield f"data: {json.dumps({'type': 'status', 'message': '✅ 请求参数验证通过', 'step': 'validation_complete'}, ensure_ascii=False)}\n\n"
            
            # 3. 初始化LLM服务
            yield f"data: {json.dumps({'type': 'status', 'message': '初始化AI模型服务...', 'step': 'llm_init'}, ensure_ascii=False)}\n\n"
            async with LLMService() as llm_service:
                yield f"data: {json.dumps({'type': 'status', 'message': '✅ AI模型服务初始化完成', 'step': 'llm_init_complete'}, ensure_ascii=False)}\n\n"
                
                # 4. 查询扩展
                yield f"data: {json.dumps({'type': 'status', 'message': '扩展查询关键词...', 'step': 'query_expansion'}, ensure_ascii=False)}\n\n"
                from services.chat.chat import _generate_simple_expanded_queries
                expanded_queries = _generate_simple_expanded_queries(request.query)
                yield f"data: {json.dumps({'type': 'status', 'message': f'✅ 查询扩展完成，生成 {len(expanded_queries)} 个扩展查询', 'step': 'query_expansion_complete', 'data': expanded_queries}, ensure_ascii=False)}\n\n"

                # 5. 知识库搜索
                yield f"data: {json.dumps({'type': 'status', 'message': f'搜索知识库 {request.knowledge_base_name}...', 'step': 'knowledge_search'}, ensure_ascii=False)}\n\n"
                from services.knowledge import KnowledgeService
                knowledge_service = KnowledgeService()

                search_results = []
                for i, expanded_query in enumerate(expanded_queries[:5], 1):
                    yield f"data: {json.dumps({'type': 'status', 'message': f'搜索扩展查询 {i}/{len(expanded_queries[:5])}: {expanded_query}', 'step': 'knowledge_search_progress'}, ensure_ascii=False)}\n\n"

                    results = await knowledge_service.search_knowledge_base(
                        query=expanded_query,
                        knowledge_base_name=request.knowledge_base_name,
                        top_k=2,
                        score_threshold=0.8,
                        use_rerank=True
                    )
                    search_results.extend(results)

                # 去重和排序
                search_results = knowledge_service.deduplicate_results(search_results)
                search_results = search_results[:10]
                yield f"data: {json.dumps({'type': 'status', 'message': f'✅ 知识库搜索完成，找到 {len(search_results)} 个相关文档', 'step': 'knowledge_search_complete', 'data': {'total_results': len(search_results), 'avg_score': sum(r.get('score', 0) for r in search_results) / len(search_results) if search_results else 0}}, ensure_ascii=False)}\n\n"
                
                # 6. 处理知识库文档
                yield f"data: {json.dumps({'type': 'status', 'message': '处理知识库文档内容...', 'step': 'documents_processing'}, ensure_ascii=False)}\n\n"

                # 获取知识库文件列表
                file_list = await chat_service._get_knowledge_base_files(request.knowledge_base_name)
                if file_list:
                    yield f"data: {json.dumps({'type': 'status', 'message': f'找到 {len(file_list)} 个知识库文件', 'step': 'documents_processing_files'}, ensure_ascii=False)}\n\n"

                    # 下载所有文件内容
                    all_documents_content = await chat_service._download_all_documents(
                        request.knowledge_base_name,
                        file_list
                    )

                    if all_documents_content:
                        yield f"data: {json.dumps({'type': 'status', 'message': f'成功下载 {len(file_list)} 个文件，开始AI分析...', 'step': 'documents_processing_analysis'}, ensure_ascii=False)}\n\n"

                        # 构建LLM请求消息
                        llm_messages = []
                        
                        # 添加系统提示词
                        llm_messages.append({"role": "system", "content": f"你是{request.knowledge_base_name}知识库的专业智能助手，请根据提供的完整文档内容回答用户问题。"})
                        
                        # 添加历史对话记录
                        if request.include_history and request.history:
                            # 限制历史记录长度，防止token超限
                            history_to_include = request.history[-request.max_history_length:] if len(request.history) > request.max_history_length else request.history
                            for msg in history_to_include:
                                if msg.role in ["user", "assistant"]:
                                    llm_messages.append({"role": msg.role, "content": msg.content})
                        
                        # 添加当前用户查询（包含文档内容）
                        llm_messages.append({"role": "user", "content": f"基于以下文档内容回答问题：\n\n文档内容：{all_documents_content}\n\n问题：{request.query}"})

                        # 开始流式LLM响应
                        yield f"data: {json.dumps({'type': 'status', 'message': '🤖 AI正在分析文档并生成答案...', 'step': 'documents_processing_llm'}, ensure_ascii=False)}\n\n"

                        initial_answer = ""
                        async for chunk in llm_service.stream_chat_completion(llm_messages, False):
                            if isinstance(chunk, dict) and chunk.get("type") == "text":
                                initial_answer += chunk["data"]
                                # yield f"data: {json.dumps({'type': 'llm_stream', 'content': chunk['data'], 'step': 'documents_processing_stream'}, ensure_ascii=False)}\n\n"
                            elif isinstance(chunk, str):
                                initial_answer += chunk
                                # yield f"data: {json.dumps({'type': 'llm_stream', 'content': chunk, 'step': 'documents_processing_stream'}, ensure_ascii=False)}\n\n"

                        yield f"data: {json.dumps({'type': 'status', 'message': f'✅ 文档分析完成，生成初步答案 ({len(initial_answer)} 字符)', 'step': 'documents_processing_complete'}, ensure_ascii=False)}\n\n"

                        documents_processing_result = {
                            "documents_content": all_documents_content,
                            "initial_answer": initial_answer,
                            "file_count": len(file_list),
                            "file_list": file_list,
                            "processing_success": True
                        }
                    else:
                        yield f"data: {json.dumps({'type': 'status', 'message': '⚠️ 文档内容为空，将使用知识卡片信息', 'step': 'documents_processing_failed'}, ensure_ascii=False)}\n\n"
                        documents_processing_result = {
                            "documents_content": "",
                            "initial_answer": "",
                            "file_count": len(file_list),
                            "processing_success": False
                        }
                else:
                    yield f"data: {json.dumps({'type': 'status', 'message': '⚠️ 知识库没有找到文件，将使用知识卡片信息', 'step': 'documents_processing_failed'}, ensure_ascii=False)}\n\n"
                    documents_processing_result = {
                        "documents_content": "",
                        "initial_answer": "",
                        "file_count": 0,
                        "processing_success": False
                    }
                
                # 7. 整合信息并生成最终答案
                yield f"data: {json.dumps({'type': 'status', 'message': '整合信息并生成最终答案...', 'step': 'integration'}, ensure_ascii=False)}\n\n"
                
                try:
                    # 构建知识卡片上下文
                    knowledge_cards_context = chat_service._build_context_from_search_results(search_results)
                    # 获取文档处理结果
                    documents_answer = documents_processing_result.get("initial_answer", "")
                    documents_content = documents_processing_result.get("documents_content", "")
                    # 构建整合提示词
                    integration_prompt = chat_service._build_integration_prompt(
                        query=request.query,
                        knowledge_cards_context=knowledge_cards_context,
                        documents_answer=documents_answer,
                        documents_content=documents_content,
                        knowledge_base_name=request.knowledge_base_name,
                        prompt_name="default"
                    )
                    yield f"data: {json.dumps({'type': 'status', 'message': '调用AI模型生成最终答案...', 'step': 'llm_generation'}, ensure_ascii=False)}\n\n"
                    
                    # 构建包含历史记录的messages
                    llm_messages = []
                    
                    # 添加系统提示词
                    llm_messages.append({"role": "system", "content": f"你是{request.knowledge_base_name}知识库的专业智能助手，请根据提供的知识卡片和文档内容回答用户问题。"})
                    
                    # 添加历史对话记录
                    if request.include_history and request.history:
                        # 限制历史记录长度，防止token超限
                        history_to_include = request.history[-request.max_history_length:] if len(request.history) > request.max_history_length else request.history
                        for msg in history_to_include:
                            if msg.role in ["user", "assistant"]:
                                llm_messages.append({"role": msg.role, "content": msg.content})
                    
                    # 添加当前用户查询（包含整合提示词）
                    llm_messages.append({"role": "user", "content": integration_prompt})
                    
                    final_response = ""
                    web_search_results = []
                    web_search_enabled = getattr(request, 'web_search_enabled', False)
                    async for chunk in llm_service.stream_chat_completion(llm_messages, web_search_enabled):
                        if isinstance(chunk, dict) and chunk.get("type") == "text":
                            final_response += chunk["data"]
                            yield f"data: {json.dumps({'type': 'llm_stream', 'content': chunk['data'], 'step': 'integration_stream'}, ensure_ascii=False)}\n\n"
                        elif isinstance(chunk, dict) and chunk.get("type") == "web_search":
                            web_search_results = chunk["data"]
                    response_source = "knowledge_base_integrated"
                    yield f"data: {json.dumps({'type': 'status', 'message': '✅ 成功使用知识库整合处理', 'step': 'integration_complete'}, ensure_ascii=False)}\n\n"
                    # 发送最终答案
                    yield f"data: {json.dumps({'type': 'answer', 'content': final_response, 'step': 'answer_complete'}, ensure_ascii=False)}\n\n"
                    # 发送web_search结构化结果
                    if web_search_results:
                        yield f"data: {json.dumps({'type': 'web_search', 'data': web_search_results, 'step': 'web_search_results'}, ensure_ascii=False)}\n\n"
                except Exception as e:
                    yield f"data: {json.dumps({'type': 'status', 'message': f'⚠️ 知识库整合处理失败，回退到基础LLM回答: {str(e)}', 'step': 'integration_fallback'}, ensure_ascii=False)}\n\n"
                    # 回退到基础LLM回答 - 流式输出
                    context = chat_service._build_context_from_search_results(search_results)
                    
                    # 构建包含历史记录的messages
                    llm_messages = []
                    
                    # 添加系统提示词
                    llm_messages.append({"role": "system", "content": "你是一个专业知识库的助手，请根据提供的上下文回答用户问题。"})
                    
                    # 添加历史对话记录
                    if request.include_history and request.history:
                        # 限制历史记录长度，防止token超限
                        history_to_include = request.history[-request.max_history_length:] if len(request.history) > request.max_history_length else request.history
                        for msg in history_to_include:
                            if msg.role in ["user", "assistant"]:
                                llm_messages.append({"role": msg.role, "content": msg.content})
                    
                    # 添加当前用户查询（包含上下文）
                    llm_messages.append({"role": "user", "content": f"基于以下上下文回答问题：\n\n上下文：{context}\n\n问题：{request.query}"})
                    
                    final_response = ""
                    web_search_results = []
                    web_search_enabled = getattr(request, 'web_search_enabled', False)
                    async for chunk in llm_service.stream_chat_completion(llm_messages, web_search_enabled):
                        if isinstance(chunk, dict) and chunk.get("type") == "text":
                            final_response += chunk["data"]
                            yield f"data: {json.dumps({'type': 'llm_stream', 'content': chunk['data'], 'step': 'fallback_stream'}, ensure_ascii=False)}\n\n"
                        elif isinstance(chunk, dict) and chunk.get("type") == "web_search":
                            web_search_results = chunk["data"]
                    response_source = "llm_fallback"
                    yield f"data: {json.dumps({'type': 'answer', 'content': final_response, 'step': 'answer_complete'}, ensure_ascii=False)}\n\n"
                    if web_search_results:
                        yield f"data: {json.dumps({'type': 'web_search', 'data': web_search_results, 'step': 'web_search_results'}, ensure_ascii=False)}\n\n"
                # 9. 发送完整响应数据
                response_data = {
                    "status": "success",
                    "query": request.query,
                    "response": final_response,
                    "context": search_results,
                    "conversation_id": request.conversation_id,
                    "model": request.model_name,
                    "usage": {},  # 流式响应暂不统计usage
                    "sources": search_results,
                    "expanded_queries": expanded_queries,
                    "search_results": search_results,
                    "response_source": response_source,
                    "documents_processing": {
                        "processing_success": documents_processing_result.get("processing_success", False),
                        "file_count": documents_processing_result.get("file_count", 0),
                        "file_list": documents_processing_result.get("file_list", []),
                        "documents_content_length": len(documents_processing_result.get("documents_content", "")),
                        "initial_answer_length": len(documents_processing_result.get("initial_answer", "")),
                        "error": documents_processing_result.get("error", None)
                    },
                    "search_metadata": {
                        "total_results": len(search_results),
                        "avg_score": sum(r.get("score", 0) for r in search_results) / len(search_results) if search_results else 0,
                        "search_time": datetime.now().isoformat()
                    },
                    "timestamp": datetime.now().isoformat(),
                    "web_search_results": web_search_results
                }
                yield f"data: {json.dumps({'type': 'complete', 'data': response_data, 'step': 'complete'}, ensure_ascii=False)}\n\n"
                
                # 10. 保存对话记录（如果需要）
                if request.conversation_id:
                    yield f"data: {json.dumps({'type': 'status', 'message': '保存对话记录...', 'step': 'save_conversation'}, ensure_ascii=False)}\n\n"
                    await chat_service._save_conversation(
                        conversation_id=request.conversation_id,
                        query=request.query,
                        response=final_response,
                        context=search_results,
                        quality_assessment={
                            "type": "documents_processing",
                            "processing_success": documents_processing_result.get("processing_success", False),
                            "file_count": documents_processing_result.get("file_count", 0),
                            "response_source": response_source
                        }
                    )
                    yield f"data: {json.dumps({'type': 'status', 'message': '✅ 对话记录保存完成', 'step': 'save_conversation_complete'}, ensure_ascii=False)}\n\n"
                
                yield f"data: {json.dumps({'type': 'status', 'message': '🎉 处理完成！', 'step': 'finished'}, ensure_ascii=False)}\n\n"
                
        except Exception as e:
            error_message = f"处理过程中发生错误: {str(e)}"
            yield f"data: {json.dumps({'type': 'error', 'message': error_message, 'step': 'error'}, ensure_ascii=False)}\n\n"
            print(f"流式聊天处理失败: {str(e)}")
    
    return StreamingResponse(
        generate_stream(),
        media_type="text/plain",
        headers={
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Content-Type": "text/event-stream",
            "Access-Control-Allow-Origin": "*",
            "Access-Control-Allow-Headers": "*",
        }
    )


# 注册文件上传路由
app.include_router(file_router)

# 新增代码生成接口
class CodeGenerationRequest(BaseModel):
    # 关键信息字符串-前端传入最新数据
    key: str
    # 生成代码的标题
    title: str
    # 提取的关键信息
    extract: str
    # 生成代码的语言类型
    language_type: str


@app.post("/code/generate")
async def generate_code_example(request: CodeGenerationRequest):
    """根据查询内容和语言类型生成代码示例（流式返回）"""
    from services.llm import LLMService
    from config.crypto_config import CODE_GEN_PROMPT_TEMPLATES, SYSTEM_MESSAGE_TEMPLATES
    from langchain_core.messages import SystemMessage, HumanMessage

    async def generate_stream():
        try:
            yield f"data: {json.dumps({'type': 'status', 'message': '开始生成代码示例...', 'step': 'start'}, ensure_ascii=False)}\n\n"
            yield f"data: {json.dumps({'type': 'status', 'message': '验证请求参数...', 'step': 'validation'}, ensure_ascii=False)}\n\n"
            if not request.key:
                yield f"data: {json.dumps({'type': 'error', 'message': '关键信息不能为空', 'step': 'validation_error'}, ensure_ascii=False)}\n\n"
                return
            if not request.title:
                yield f"data: {json.dumps({'type': 'error', 'message': '标题不能为空', 'step': 'validation_error'}, ensure_ascii=False)}\n\n"
                return
            if not request.extract:
                yield f"data: {json.dumps({'type': 'error', 'message': '提取信息不能为空', 'step': 'validation_error'}, ensure_ascii=False)}\n\n"
                return
            if not request.language_type:
                yield f"data: {json.dumps({'type': 'error', 'message': '语言类型不能为空', 'step': 'validation_error'}, ensure_ascii=False)}\n\n"
                return
            yield f"data: {json.dumps({'type': 'status', 'message': '✅ 请求参数验证通过', 'step': 'validation_complete'}, ensure_ascii=False)}\n\n"
            yield f"data: {json.dumps({'type': 'status', 'message': '初始化AI模型服务...', 'step': 'llm_init'}, ensure_ascii=False)}\n\n"
            
            async with LLMService() as llm_service:
                yield f"data: {json.dumps({'type': 'status', 'message': '✅ AI模型服务初始化完成', 'step': 'llm_init_complete'}, ensure_ascii=False)}\n\n"
                yield f"data: {json.dumps({'type': 'status', 'message': f'正在生成{request.language_type}代码示例...', 'step': 'code_generation'}, ensure_ascii=False)}\n\n"

                title_key_map = {
                    "信息种类及关键数据类型": "KEY",
                    "身份鉴别": "AUTH",
                    "访问控制信息完整性保护": "ACCESS_CONTROL",
                    "重要数据传输机密性和完整性": "KEY_DATA_TRANSPORT_CONFIDENTIALITY_AND_INTEGRITY",
                    "重要数据存储机密性": "KEY_DATA_STORE_CONFIDENTIALITY",
                    "重要数据存储完整性": "KEY_DATA_STORE_INTEGRITY",
                    "重要信息资源安全标记完整性": "KEY_DATA_SAFE_MARK_INTEGRITY",
                    "不可否认性": "NON-REPUDIATION"
                }
                # 获取示例API的Key值
                code_example_key = title_key_map.get(request.title, request.title)
                gen_type = code_example_key
                gen_language = request.language_type

                from services.file.api_info_service import APIInfoService
                api_info = APIInfoService().get_api_info(gen_type)
                prompt_template = CODE_GEN_PROMPT_TEMPLATES.get(f"{gen_type}_CODE_GENERATOR")
                if not prompt_template:
                    yield f"data: {json.dumps({'type': 'error', 'message': '未找到对应的代码生成模板', 'step': 'template_error'}, ensure_ascii=False)}\n\n"
                    return
                prompt_message = prompt_template.format(
                    key_information=request.key,
                    crypto_eval_information=request.extract,
                    api_information=api_info,
                    language=gen_language
                )
                
                # 构建LLM消息
                llm_messages = [
                    {"role": "system", "content": SYSTEM_MESSAGE_TEMPLATES},
                    {"role": "user", "content": prompt_message}
                ]
                
                # 真正流式调用 LLM
                try:
                    code_content = ""
                    step_count = 0
                    start_time = time.time()
                    async for chunk in llm_service.stream_chat_completion(llm_messages, False):
                        step_count += 1
                        if chunk["type"] == "text":
                            code_content += chunk["data"]
                            yield f"data: {json.dumps({'type': 'code_stream', 'content': chunk['data'], 'step': 'code_generation'}, ensure_ascii=False)}\n\n"
                        elif isinstance(chunk, str):
                            code_content += chunk
                            yield f"data: {json.dumps({'type': 'code_stream', 'content': chunk, 'step': 'code_generation'}, ensure_ascii=False)}\n\n"
                    process_time = round(time.time() - start_time, 2)
                    match_count = 0
                    answer_length = len(code_content)
                    yield f"data: {json.dumps({'type': 'status', 'message': f'✅ 代码示例生成完成', 'step': 'code_generation_complete'}, ensure_ascii=False)}\n\n"
                    response_data = {
                        "status": "success",
                        "language_type": request.language_type,
                        "timestamp": datetime.now().isoformat(),
                        "process_time": process_time,
                        "step_count": step_count,
                        "match_count": match_count,
                        "answer_length": answer_length,
                        "code_content": code_content
                    }
                    yield f"data: {json.dumps({'type': 'complete', 'data': response_data, 'step': 'complete'}, ensure_ascii=False)}\n\n"
                    yield f"data: {json.dumps({'type': 'status', 'message': '🎉 处理完成！', 'step': 'finished'}, ensure_ascii=False)}\n\n"
                except Exception as e:
                    yield f"data: {json.dumps({'type': 'error', 'message': f'LLM生成失败: {str(e)}', 'step': 'llm_error'}, ensure_ascii=False)}\n\n"
                    return
        except Exception as e:
            error_message = f"处理过程中发生错误: {str(e)}"
            yield f"data: {json.dumps({'type': 'error', 'message': error_message, 'step': 'error'}, ensure_ascii=False)}\n\n"
            print(f"代码生成处理失败: {str(e)}")
    return StreamingResponse(
        generate_stream(),
        media_type="text/plain",
        headers={
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Content-Type": "text/event-stream",
            "Access-Control-Allow-Origin": "*",
            "Access-Control-Allow-Headers": "*",
        }
    )

class CodeExampleRequest(BaseModel):
    title: str

@app.post("/code-example")
def code_example(request: CodeExampleRequest):
    title_key_map = {
        "信息种类及关键数据类型": "KEY",
        "身份鉴别": "AUTH",
        "访问控制信息完整性保护": "ACCESS_CONTROL",
        "重要数据传输机密性和完整性": "KEY_DATA_TRANSPORT_CONFIDENTIALITY_AND_INTEGRITY",
        "重要数据存储机密性": "KEY_DATA_STORE_CONFIDENTIALITY",
        "重要数据存储完整性": "KEY_DATA_STORE_INTEGRITY",
        "重要信息资源安全标记完整性": "KEY_DATA_SAFE_MARK_INTEGRITY",
        "不可否认性": "NON-REPUDIATION"
    }
    code_example_key = title_key_map.get(request.title, request.title)
    from config.crypto_config import API_PROMPT_TEMPLATES
    api_info = API_PROMPT_TEMPLATES.get(f"{code_example_key}_API_INFO", "")
    return {"content": api_info}

