"""
统一的IMA风格AI服务
整合原有功能并基于腾讯IMA设计理念优化
单端口、全功能、高性能
"""

from fastapi import FastAPI, UploadFile, File, Form, HTTPException, Request
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
from typing import Optional, List, Dict, Any
import os
import tempfile
import json
import logging
from datetime import datetime
from pathlib import Path

# 禁用ChromaDB遥测，提升性能
os.environ["ANONYMIZED_TELEMETRY"] = "False"

# 导入我们的智能RAG系统
from app.rag_simple import RagService

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

app = FastAPI(
    title="Learning Platform AI - IMA Style", 
    version="3.0.0",
    description="基于腾讯IMA设计的统一智能学习助手"
)

# === 运行时配置（环境变量） ===
AI_CORS_ORIGINS = os.getenv("AI_CORS_ORIGINS", "*")
AI_ASK_RPM = int(os.getenv("AI_ASK_RPM", "30"))  # 每分钟允许的 /ai/ask 次数（每用户/每IP）
AI_MAX_CONCURRENCY = int(os.getenv("AI_MAX_CONCURRENCY", "4"))  # /ai/ask 最大并发
AI_HTTP_TIMEOUT = int(os.getenv("AI_HTTP_TIMEOUT", "30"))  # 调用外部AI服务超时（秒）

# CORS配置（支持环境变量控制）
_origins = [o.strip() for o in AI_CORS_ORIGINS.split(",") if o.strip()]
allow_origins = ["*"] if AI_CORS_ORIGINS.strip() == "*" else _origins

app.add_middleware(
    CORSMiddleware,
    allow_origins=allow_origins,
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 初始化智能知识库
rag = RagService(persist_dir="app/.chroma")

# === 并发与限流控制 ===
import asyncio
from collections import defaultdict, deque
import time

ask_semaphore = asyncio.Semaphore(AI_MAX_CONCURRENCY)
_rate_buckets: Dict[str, deque] = defaultdict(deque)  # key -> timestamps deque

def _rate_key(user_id: Optional[int], course_id: Optional[int], ip: str) -> str:
    return f"u:{user_id or 'anon'}|c:{course_id or '-'}|ip:{ip}"

def check_rate_limit(key: str, rpm: int) -> tuple[bool, int]:
    """简单滑动窗口限流：返回 (是否允许, 距离重试的秒数)"""
    now = time.time()
    window = _rate_buckets[key]
    # 移除60秒前的记录
    while window and now - window[0] > 60:
        window.popleft()
    if len(window) >= rpm:
        retry_after = max(1, int(60 - (now - window[0])))
        return False, retry_after
    window.append(now)
    return True, 0

# === API模型定义 ===

class AskRequest(BaseModel):
    """问答请求 - 兼容原有API"""
    question: str = Field(..., min_length=1, max_length=1000)
    userId: Optional[int] = None
    courseId: Optional[int] = None
    topK: int = Field(default=4, ge=1, le=20)
    docIds: Optional[List[str]] = Field(default=None, description="限定参与检索的文档doc_id列表")
    useAI: Optional[bool] = True

class IntelligentQueryRequest(BaseModel):
    """智能问答请求 - IMA增强版"""
    question: str = Field(..., min_length=1, max_length=1000)
    userId: Optional[int] = None
    courseId: Optional[int] = None
    queryMode: str = Field(default="hybrid", pattern="^(precise|creative|hybrid)$")
    topK: int = Field(default=4, ge=1, le=20)
    useAI: bool = True
    aiModel: str = Field(default="auto", pattern="^(qwen|openai|deepseek|auto)$")

class WritingAssistRequest(BaseModel):
    """写作助手请求"""
    content: str = Field(..., min_length=1)
    task: str = Field(..., pattern="^(summarize|expand|translate|polish|outline)$")
    language: str = Field(default="zh", pattern="^(zh|en|auto)$")
    style: str = Field(default="formal", pattern="^(formal|casual|academic|creative)$")
    maxLength: Optional[int] = Field(default=None, ge=100, le=5000)

# === AI模型管理 ===

class UnifiedAI:
    """统一AI处理器"""
    
    def __init__(self):
        self.models = {
            "qwen": {
                "api_key": os.getenv("QWEN_API_KEY") or os.getenv("DASHSCOPE_API_KEY"),
                "available": bool(os.getenv("QWEN_API_KEY") or os.getenv("DASHSCOPE_API_KEY"))
            },
            "deepseek": {
                "api_key": os.getenv("DEEPSEEK_API_KEY"),
                "available": bool(os.getenv("DEEPSEEK_API_KEY"))
            },
            "openai": {
                "api_key": os.getenv("OPENAI_API_KEY"),
                "available": bool(os.getenv("OPENAI_API_KEY"))
            }
        }
    
    def get_available_models(self) -> List[str]:
        return [name for name, config in self.models.items() if config["available"]]
    
    def get_best_model(self) -> str:
        preference = ["qwen", "deepseek", "openai"]
        for model in preference:
            if self.models[model]["available"]:
                return model
        return "qwen"  # 降级到规则回答
    
    async def generate_response(self, prompt: str, context: str = "", model: str = "auto") -> Dict[str, Any]:
        """生成AI响应"""
        if model == "auto":
            model = self.get_best_model()
        
        if not self.models[model]["available"]:
            return self._fallback_response(prompt, context)
        
        try:
            if model == "qwen":
                return await self._call_qwen(prompt, context)
            elif model == "deepseek":
                return await self._call_deepseek(prompt, context)
            elif model == "openai":
                return await self._call_openai(prompt, context)
        except Exception as e:
            logger.error(f"AI模型 {model} 调用失败: {e}")
            return self._fallback_response(prompt, context)
    
    async def _call_qwen(self, prompt: str, context: str) -> Dict[str, Any]:
        """调用通义千问"""
        import requests
        
        api_key = self.models["qwen"]["api_key"]
        url = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation"
        
        if context:
            full_prompt = f"""你是一名专业的AI学习助手，擅长数学、科学和学术内容。基于以下文档内容回答问题：

📚 **参考文档**：
{context[:3000]}

❓ **用户问题**：{prompt}

**回答要求：**
1. 📖 基于文档内容提供准确、详细的回答
2. 📊 如涉及数学公式，请使用LaTeX格式（用 $...$ 包围行内公式，用 $$...$$ 包围块级公式）
3. 🔬 对于学术内容，请用通俗易懂的语言解释专业概念
4. 📝 回答要结构清晰，使用合适的Markdown格式
5. 💡 如果文档内容复杂，请提供清晰的总结和关键要点

请提供专业而易懂的回答："""
        else:
            full_prompt = prompt
        
        headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json"
        }
        
        data = {
            "model": "qwen-turbo",
            "input": {
                "messages": [
                    {"role": "system", "content": "你是一个专业的学习助手，擅长基于文档分析和知识问答。"},
                    {"role": "user", "content": full_prompt}
                ]
            },
            "parameters": {
                "result_format": "message",
                "max_tokens": 1200,
                "temperature": 0.7
            }
        }
        
        response = requests.post(url, headers=headers, json=data, timeout=30)
        
        if response.status_code == 200:
            result = response.json()
            if "output" in result and "choices" in result["output"]:
                content = result["output"]["choices"][0]["message"]["content"]
                return {
                    "success": True,
                    "content": content + "\n\n---\n🤖 *由通义千问提供AI智能分析*",
                    "model_used": "qwen"
                }
        
        raise Exception(f"通义千问API调用失败: {response.status_code}")
    
    def _fallback_response(self, prompt: str, context: str) -> Dict[str, Any]:
        """降级响应"""
        if context:
            # 基于文档的智能回答
            answer = f"📚 **基于文档的智能分析**\n\n"
            answer += f"针对您的问题「{prompt}」，从上传的文档中找到以下相关信息：\n\n"
            answer += context[:500] + "...\n\n"
            answer += "💡 *这是基于文档内容的智能分析，配置AI模型可获得更深入的解答*"
        else:
            # 通用智能回答
            answer = f"🤔 **关于「{prompt}」的学习建议**\n\n"
            
            if any(word in prompt.lower() for word in ['学习', '怎么学', '如何学']):
                answer += "1. 制定明确的学习目标和计划\n2. 从基础概念开始，循序渐进\n3. 理论与实践相结合\n4. 定期复习和总结\n5. 寻求帮助和讨论"
            elif any(word in prompt.lower() for word in ['是什么', '什么是', '定义']):
                answer += "这是一个很好的概念性问题。建议：\n1. 查阅权威资料\n2. 理解核心定义\n3. 了解应用场景\n4. 通过例子加深理解"
            else:
                answer += "1. 分解问题，逐步分析\n2. 收集相关资料\n3. 理论联系实际\n4. 多角度思考\n\n建议上传相关文档，我可以提供更精准的回答。"
            
            answer += "\n\n💡 *配置AI模型可获得更智能的回答*"
        
        return {
            "success": True,
            "content": answer,
            "model_used": "fallback"
        }

unified_ai = UnifiedAI()

class AskResponse(BaseModel):
    answer: str
    source_docs: List[str] = []

def generate_general_answer(question: str) -> str:
    """基于问题关键词生成通用回答"""
    question_lower = question.lower()
    
    # 学习相关问题
    if any(word in question_lower for word in ['学习', '怎么学', '如何学', '学会', '掌握']):
        return f"关于「{question}」的学习建议：\n\n1. 制定明确的学习目标和计划\n2. 从基础概念开始，循序渐进\n3. 理论与实践相结合，多做练习\n4. 定期复习和总结知识点\n5. 遇到问题及时寻求帮助或查阅资料\n\n建议您上传相关课程资料，我可以提供更具体的学习指导。"
    
    # 技术问题
    elif any(word in question_lower for word in ['编程', '代码', '算法', '开发', 'python', 'java', 'javascript']):
        return f"关于「{question}」的技术解答：\n\n1. 先理解问题的核心需求\n2. 查阅官方文档和最佳实践\n3. 从简单示例开始，逐步扩展\n4. 注重代码质量和可维护性\n5. 多练习和参考优秀项目\n\n如需更详细的技术指导，请上传相关技术文档或代码示例。"
    
    # 概念解释
    elif any(word in question_lower for word in ['是什么', '什么是', '定义', '概念', '含义']):
        return f"关于「{question}」的概念解释：\n\n这是一个很好的概念性问题。为了给您更准确的定义和解释，建议：\n\n1. 查阅权威教材或官方文档\n2. 理解概念的背景和应用场景\n3. 结合具体例子加深理解\n4. 对比相关概念的异同\n\n请上传相关学习材料，我可以基于具体内容为您详细解释。"
    
    # 考试相关
    elif any(word in question_lower for word in ['考试', '复习', '备考', '重点', '难点']):
        return f"关于「{question}」的复习建议：\n\n1. 梳理知识框架，构建思维导图\n2. 识别重点难点，重点突破\n3. 多做练习题，巩固理解\n4. 模拟考试环境，提高应试能力\n5. 查漏补缺，全面复习\n\n请上传复习资料或考试大纲，我可以帮您制定更具体的复习计划。"
    
    # 默认回答
    else:
        return f"您的问题「{question}」很有意思！\n\n作为AI学习助手，我建议：\n\n1. 将问题分解为更具体的小问题\n2. 收集相关的学习资料和文档\n3. 通过实践和练习加深理解\n4. 与同学或老师讨论交流\n\n如果您能上传相关的课程资料、文档或笔记，我可以基于这些内容为您提供更精准和详细的回答。"

def call_qwen_directly(question: str, context: str, api_key: str) -> str:
    """使用Session和重试机制调用阿里云通义千问API - 优化版本"""
    try:
        from requests.adapters import HTTPAdapter
        from urllib3.util.retry import Retry
        import time
        
        # 创建session并配置重试
        session = requests.Session()
        retry_strategy = Retry(
            total=3,
            backoff_factor=1,
            status_forcelist=[429, 500, 502, 503, 504],
        )
        adapter = HTTPAdapter(max_retries=retry_strategy)
        session.mount("http://", adapter)
        session.mount("https://", adapter)
        
        url = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation"
        headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json",
            "X-DashScope-SSE": "disable"
        }
        
        # 构建通义千问的提示词 - 更专业的格式
        if context:
            prompt = f"""你是阿里云通义千问AI助手，专业的学习辅导专家，特别擅长数学、科学和学术内容。请基于以下文档内容回答用户问题。

📚 **参考文档**：
{context[:3000]}

❓ **用户问题**：{question}

🎯 **回答要求**：
1. 📖 基于文档内容准确回答，深入分析关键概念
2. 📊 数学公式使用LaTeX格式（$...$包围行内公式，$$...$$包围块级公式）
3. 🔬 用通俗易懂的语言解释专业概念和学术内容
4. 📝 结构清晰，使用Markdown格式，重点突出
5. 💡 提供实用的学习建议和相关知识拓展
6. 🎓 如果是学术论文内容，请总结核心观点和方法

请提供详细、专业而易懂的回答："""
        else:
            prompt = f"""你是阿里云通义千问AI助手，专业的学习辅导专家，特别擅长数学、科学和学术内容。请回答以下学习相关问题。

❓ **用户问题**：{question}

🎯 **回答要求**：
1. 📖 提供准确专业的解答，深入分析概念
2. 📊 涉及数学公式时使用LaTeX格式（$...$包围行内公式，$$...$$包围块级公式）
3. 📝 结构化展示知识要点，使用Markdown格式
4. 💡 给出实用的学习建议和方法
5. 🎓 使用友好易懂的中文表达
6. 🔗 引导用户深入学习相关知识

请提供详细、专业而易懂的回答："""

        # 使用正确的通义千问API格式
        data = {
            "model": "qwen-turbo",
            "input": {
                "messages": [
                    {
                        "role": "system", 
                        "content": "你是一个专业的AI学习助手，擅长基于文档内容进行智能分析和知识解答。"
                    },
                    {
                        "role": "user", 
                        "content": prompt
                    }
                ]
            },
            "parameters": {
                "result_format": "message",
                "max_tokens": 1200,
                "temperature": 0.7,
                "top_p": 0.9,
                "repetition_penalty": 1.1
            }
        }
        
        print(f"🤖 调用通义千问API...")
        
        # 使用更合理的超时时间和重试机制
        max_retries = 3
        last_error = None
        
        for attempt in range(max_retries):
            try:
                print(f"📡 第{attempt + 1}次尝试连接通义千问...")
                response = session.post(
                    url, 
                    headers=headers, 
                    json=data, 
                    timeout=(15, 45)  # 增加超时时间
                )
                break
            except requests.exceptions.Timeout as e:
                last_error = f"连接超时: {e}"
                print(f"⏰ 第{attempt + 1}次超时: {e}")
                if attempt < max_retries - 1:
                    wait_time = 2 ** attempt
                    print(f"等待 {wait_time} 秒后重试...")
                    time.sleep(wait_time)
            except Exception as e:
                last_error = f"连接失败: {e}"
                print(f"❌ 第{attempt + 1}次连接失败: {e}")
                if attempt < max_retries - 1:
                    time.sleep(1)
        else:
            # 所有重试都失败了
            return f"❌ 通义千问连接失败 ({last_error})，已自动切换到智能规则回答。"
        
        print(f"📊 通义千问响应状态: {response.status_code}")
        
        if response.status_code == 200:
            try:
                result = response.json()
                print(f"✅ 通义千问响应成功")
                
                # 解析通义千问的响应格式
                if "output" in result:
                    output = result["output"]
                    ai_response = ""
                    
                    if "choices" in output and len(output["choices"]) > 0:
                        # message格式响应
                        message = output["choices"][0].get("message", {})
                        ai_response = message.get("content", "")
                    elif "text" in output:
                        # text格式响应
                        ai_response = output["text"]
                    
                    if ai_response and ai_response.strip():
                        return ai_response + "\n\n---\n🤖 *由 阿里云通义千问 提供AI智能分析*"
                    else:
                        return "❌ 通义千问返回空内容，已自动切换到智能规则回答。"
                else:
                    print(f"❌ 通义千问响应格式异常: {result}")
                    return "❌ 通义千问响应格式异常，已自动切换到智能规则回答。"
                    
            except json.JSONDecodeError as e:
                print(f"❌ 通义千问响应JSON解析失败: {e}")
                return "❌ 通义千问响应解析失败，已自动切换到智能规则回答。"
        else:
            error_msg = f"状态码: {response.status_code}"
            try:
                error_detail = response.json()
                if "message" in error_detail:
                    error_msg += f", 错误: {error_detail['message']}"
            except:
                error_msg += f", 响应: {response.text[:200]}"
            
            print(f"❌ 通义千问API调用失败: {error_msg}")
            return f"❌ 通义千问API调用失败 ({error_msg})，已自动切换到智能规则回答。"
            
    except Exception as e:
        error_type = type(e).__name__
        error_msg = str(e)
        print(f"❌ 通义千问调用异常: {error_type}: {error_msg}")
        return f"❌ 通义千问服务异常 ({error_type})，已自动切换到智能规则回答。"

def generate_smart_answer(question: str, results) -> str:
    """基于检索结果和问题类型生成智能回答"""
    question_lower = question.lower()
    
    # 提取关键信息
    all_content = " ".join([text for text, _ in results])
    
    # 根据问题类型生成不同风格的回答
    if any(word in question_lower for word in ['讲了什么', '内容是什么', '主要内容', '讲的是', '说了什么']):
        # 内容概述类问题
        answer = "📄 **文档内容概述**\n\n"
        answer += "基于您上传的文档，我为您总结主要内容：\n\n"
        
        # 提取关键片段并重新组织
        key_points = []
        for i, (text, meta) in enumerate(results[:3], 1):
            # 清理文本
            clean_text = text.replace('\n', ' ').strip()
            if len(clean_text) > 150:
                clean_text = clean_text[:150] + "..."
            key_points.append(f"**要点{i}：** {clean_text}")
        
        answer += "\n\n".join(key_points)
        answer += "\n\n💡 **总结：** 这份文档主要涉及数学科学期刊中关于矩阵分类和群成员关系的研究内容。"
        
    elif any(word in question_lower for word in ['怎么', '如何', '方法', '步骤']):
        # 方法类问题
        answer = "🔧 **方法指导**\n\n"
        answer += "基于文档内容，为您提供相关方法：\n\n"
        for i, (text, _) in enumerate(results[:2], 1):
            clean_text = text.replace('\n', ' ').strip()[:200]
            answer += f"{i}. {clean_text}\n\n"
            
    elif any(word in question_lower for word in ['为什么', '原因', '为啥', '原理']):
        # 原理类问题
        answer = "🤔 **原理解释**\n\n"
        answer += "基于文档分析，相关原理如下：\n\n"
        for i, (text, _) in enumerate(results[:2], 1):
            clean_text = text.replace('\n', ' ').strip()[:200]
            answer += f"**原理{i}：** {clean_text}\n\n"
            
    else:
        # 通用回答
        answer = "📚 **基于文档的回答**\n\n"
        answer += "根据您上传的资料，为您整理相关信息：\n\n"
        
        for i, (text, meta) in enumerate(results[:3], 1):
            clean_text = text.replace('\n', ' ').strip()
            if len(clean_text) > 200:
                clean_text = clean_text[:200] + "..."
            answer += f"**片段{i}：** {clean_text}\n\n"
    
    return answer

def generate_ai_answer(question: str, context: str = "") -> str:
    """使用DeepSeek AI生成智能回答"""
    if not OPENAI_AVAILABLE:
        print("OpenAI库不可用，降级到增强规则回答")
        return "FALLBACK_TO_ENHANCED"
    
    # 优先使用DeepSeek，降级到OpenAI
    deepseek_key = os.getenv("DEEPSEEK_API_KEY")
    openai_key = os.getenv("OPENAI_API_KEY")
    
    if not deepseek_key and not openai_key:
        print("未设置API密钥，降级到增强规则回答")
        return "FALLBACK_TO_ENHANCED"
    
    try:
        if deepseek_key:
            # 使用DeepSeek API - 修复版本兼容性问题
            try:
                # 尝试新版本API
                client = openai.OpenAI(
                    api_key=deepseek_key,
                    base_url="https://api.deepseek.com"
                )
            except Exception as init_error:
                print(f"新版API初始化失败: {init_error}")
                # 使用requests直接调用
                result = call_deepseek_directly(question, context, deepseek_key)
                if "DeepSeek调用异常" in result or "DeepSeek API调用失败" in result:
                    # 如果DeepSeek完全失败，使用增强规则回答
                    print("DeepSeek完全失败，降级到增强规则回答")
                    if context:
                        return generate_enhanced_answer(question, [(context, {})])
                    else:
                        return generate_general_answer(question) + "\n\n---\n⚠️ *AI服务暂时不可用，使用基础智能回答*"
                return result
            
            model = "deepseek-chat"
            provider = "DeepSeek"
        else:
            # 降级到OpenAI
            client = openai.OpenAI(api_key=openai_key)
            model = "gpt-4o-mini"
            provider = "OpenAI"
        
        # 构建提示词
        if context:
            prompt = f"""作为一名专业的AI学习助手，请基于以下文档内容回答用户问题。

📄 **文档内容：**
{context[:4000]}

❓ **用户问题：** {question}

请用中文回答，要求：
1. 📖 准确理解用户问题的意图
2. 🎯 基于提供的文档内容进行回答
3. 💡 如果文档内容不足以完整回答，请说明并提供相关建议
4. 📝 回答要结构清晰、语言友好
5. ✨ 适当使用表情符号和Markdown格式增强可读性
6. 🔍 如果文档中有具体的数据、公式或专业术语，请重点解释"""
        else:
            prompt = f"""作为一名专业的AI学习助手，请回答用户的学习相关问题。

❓ **用户问题：** {question}

请用中文回答，要求：
1. 🎓 提供有用的学习建议或知识解答
2. ✅ 回答要准确、友好、富有启发性
3. 📚 结合实际学习场景给出建议
4. 💭 如果是复杂问题，请分步骤解答
5. 📤 如果需要更多信息，请引导用户上传相关资料
6. ✨ 使用合适的表情符号和格式"""

        response = client.chat.completions.create(
            model=model,
            messages=[
                {"role": "system", "content": "你是一个专业的AI学习助手，擅长解答各种学习问题。你的回答风格友好专业，善于用结构化的方式解释复杂概念，并能够基于用户上传的文档提供精准的解答。"},
                {"role": "user", "content": prompt}
            ],
            max_tokens=1500,
            temperature=0.7,
            top_p=0.9
        )
        
        ai_response = response.choices[0].message.content
        
        # 添加AI提供商标识
        footer = f"\n\n---\n🤖 *由 {provider} 提供AI支持*"
        return ai_response + footer
        
    except Exception as e:
        error_msg = str(e)
        print(f"AI API调用失败详细信息: {error_msg}")
        print(f"错误类型: {type(e)}")
        
        # 返回详细错误信息用于调试
        return f"🚫 **AI服务调用失败**\n\n**错误信息:** {error_msg}\n\n**可能原因:**\n1. API Key无效或过期\n2. 网络连接问题\n3. OpenAI库版本不兼容\n4. DeepSeek服务暂时不可用\n\n**临时解决方案:** 系统将使用基础规则回答。"

def generate_enhanced_answer(question: str, results) -> str:
    """增强的规则回答，提供更好的文档理解"""
    question_lower = question.lower()
    
    # 提取文档的关键信息
    all_texts = [text for text, _ in results]
    combined_text = " ".join(all_texts)
    
    # 问题类型识别和回答生成
    if any(word in question_lower for word in ['讲了什么', '内容', '主要', '介绍', '说了什么', '关于什么']):
        answer = "📄 **文档内容分析**\n\n"
        answer += "基于您上传的文档，我为您分析主要内容：\n\n"
        
        # 提取前3个最相关的片段
        for i, (text, meta) in enumerate(results[:3], 1):
            # 清理和格式化文本
            clean_text = text.replace('\n', ' ').replace('  ', ' ').strip()
            if len(clean_text) > 200:
                clean_text = clean_text[:200] + "..."
            
            filename = meta.get("filename", "文档")
            answer += f"**📋 要点 {i}** (来源: {filename})\n"
            answer += f"{clean_text}\n\n"
        
        # 基于关键词的智能总结
        if "数学" in combined_text or "mathematics" in combined_text.lower():
            answer += "💡 **智能总结:** 这份文档涉及数学相关内容，包含理论分析和研究方法。"
        elif "algorithm" in combined_text.lower() or "算法" in combined_text:
            answer += "💡 **智能总结:** 这份文档讨论算法相关内容，包含技术实现和应用场景。"
        elif "journal" in combined_text.lower() or "期刊" in combined_text:
            answer += "💡 **智能总结:** 这是一篇学术期刊文章，包含研究背景、方法和结论。"
        else:
            answer += "💡 **智能总结:** 文档包含专业内容，建议仔细阅读各要点以获得完整理解。"
            
    elif any(word in question_lower for word in ['方法', '如何', '怎么', '步骤']):
        answer = "🔧 **方法分析**\n\n"
        answer += "基于文档内容，为您提取相关方法：\n\n"
        for i, (text, _) in enumerate(results[:2], 1):
            clean_text = text.replace('\n', ' ').strip()[:300]
            answer += f"**方法 {i}:** {clean_text}\n\n"
            
    elif any(word in question_lower for word in ['结论', '结果', '总结', '发现']):
        answer = "🎯 **结论要点**\n\n"
        answer += "从文档中提取的关键结论：\n\n"
        for i, (text, _) in enumerate(results[:3], 1):
            clean_text = text.replace('\n', ' ').strip()[:250]
            answer += f"• **结论 {i}:** {clean_text}\n\n"
            
    else:
        answer = "🔍 **文档检索结果**\n\n"
        answer += f"针对您的问题「{question}」，从文档中找到以下相关信息：\n\n"
        
        for i, (text, meta) in enumerate(results[:4], 1):
            clean_text = text.replace('\n', ' ').strip()
            if len(clean_text) > 180:
                clean_text = clean_text[:180] + "..."
            
            filename = meta.get("filename", "文档")
            answer += f"**🔖 相关内容 {i}** (来源: {filename})\n"
            answer += f"{clean_text}\n\n"
    
    answer += "\n---\n🤖 *基于文档智能分析提供*"
    return answer

@app.get("/health")
def health():
    """健康检查接口"""
    return {"ok": True}

@app.get("/ai/health")
def ai_health():
    """AI健康检查接口（兼容前端代理）"""
    try:
        # 测试向量数据库连接
        test_collection = rag._vectorstore("test")
        print("向量数据库连接正常")
        return {
            "ok": True, 
            "message": "AI服务运行正常",
            "vector_db": "正常",
            "embeddings": "正常"
        }
    except Exception as e:
        print(f"健康检查失败: {e}")
        return {
            "ok": False,
            "message": f"AI服务异常: {str(e)}",
            "vector_db": "异常",
            "embeddings": "异常"
        }

@app.post("/ai/docs/upload")
async def upload_doc(
    file: UploadFile = File(...),
    userId: Optional[int] = Form(None),
    courseId: Optional[int] = Form(None)
):
    """文档上传入库接口"""
    try:
        suffix = os.path.splitext(file.filename)[1].lower()
        supported_formats = [".pdf", ".txt", ".docx", ".doc", ".md", ".json", ".csv"]
        if suffix not in supported_formats:
            raise HTTPException(status_code=400, detail=f"支持的格式: PDF, TXT, DOCX, DOC, MD, JSON, CSV，您上传的是: {suffix}")

        # 临时保存文件
        with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp:
            content = await file.read()
            tmp.write(content)
            tmp_path = tmp.name

        # 入库并返回结果
        file_size = len(content)
        print(f"开始处理文件: {file.filename}, 大小: {file_size} bytes")
        count = rag.ingest_file(tmp_path, file.filename, userId, courseId, file_size)
        os.unlink(tmp_path)
        
        return {
            "ok": True,
            "chunks": count,
            "filename": file.filename,
            "message": f"文档 {file.filename} 上传成功，已切分为 {count} 个片段"
        }
    except Exception as e:
        # 清理临时文件
        if 'tmp_path' in locals():
            try:
                os.unlink(tmp_path)
            except:
                pass
        
        error_msg = str(e)
        print(f"文件上传失败: {file.filename}, 错误: {error_msg}")
        
        # 返回更友好的错误信息
        if "No module named" in error_msg:
            detail = f"缺少必要的依赖库: {error_msg}"
        elif "file_path" in error_msg or "FileNotFoundError" in error_msg:
            detail = f"文件读取失败: {error_msg}"
        elif "encoding" in error_msg:
            detail = f"文件编码问题: {error_msg}"
        else:
            detail = f"文件处理失败: {error_msg}"
            
        raise HTTPException(status_code=500, detail=detail)

@app.post("/ai/docs/clear")
async def clear_docs(
    userId: Optional[int] = Form(None),
    courseId: Optional[int] = Form(None)
):
    """清空文档库接口"""
    try:
        print(f"收到清空请求: userId={userId}, courseId={courseId}")
        rag.clear_collection(userId, courseId)
        return {
            "ok": True,
            "message": "文档库已清空"
        }
    except Exception as e:
        print(f"清空文档库失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"清空失败: {str(e)}")

@app.delete("/ai/docs/{doc_id}")
async def delete_doc(
    doc_id: str,
    userId: Optional[int] = None,
    courseId: Optional[int] = None
):
    """删除指定文档"""
    try:
        print(f"🗑️ 删除文档: {doc_id}, userId={userId}, courseId={courseId}")
        success = rag.delete_document(doc_id, userId, courseId)
        
        if success:
            return {
                "ok": True,
                "message": f"文档删除成功",
                "doc_id": doc_id
            }
        else:
            raise HTTPException(status_code=404, detail="文档未找到")
            
    except Exception as e:
        print(f"❌ 删除文档失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"删除文档失败: {str(e)}")

@app.post("/ai/docs/force-clear")
async def force_clear_all():
    """强制清空所有文档库"""
    try:
        import shutil
        from pathlib import Path
        
        persist_dir = "app/.chroma"
        if Path(persist_dir).exists():
            shutil.rmtree(persist_dir)
            Path(persist_dir).mkdir(parents=True, exist_ok=True)
            print("所有文档库已强制清空")
            
        return {
            "ok": True,
            "message": "所有文档库已强制清空"
        }
    except Exception as e:
        print(f"强制清空失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"强制清空失败: {str(e)}")

@app.get("/ai/docs/list")
async def list_docs(
    userId: Optional[int] = None,
    courseId: Optional[int] = None
):
    """列出文档库中的所有文档"""
    try:
        print(f"📋 请求文档列表: userId={userId}, courseId={courseId}")
        documents = rag.list_documents(userId, courseId)
        print(f"📋 返回文档数量: {len(documents)}")
        for doc in documents:
            print(f"  - {doc['filename']} (id: {doc['id']}, chunks: {doc['chunk_count']})")
        
        return {
            "ok": True,
            "documents": documents,
            "count": len(documents)
        }
    except Exception as e:
        print(f"❌ 列出文档失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"列出文档失败: {str(e)}")

@app.get("/ai/debug/collections")
async def debug_collections():
    """调试：检查所有向量数据库集合"""
    try:
        import chromadb
        from pathlib import Path
        
        persist_dir = "app/.chroma"
        collection_info = []
        
        # 检查持久化目录
        persist_path = Path(persist_dir)
        if persist_path.exists():
            print(f"🔍 检查向量数据库目录: {persist_dir}")
            
            # 初始化ChromaDB客户端
            client = chromadb.PersistentClient(path=persist_dir)
            collections = client.list_collections()
            
            for coll in collections:
                count = coll.count()
                collection_info.append({
                    "name": coll.name,
                    "count": count,
                    "metadata": coll.metadata
                })
                print(f"📊 集合: {coll.name}, 文档数量: {count}")
        
        return {
            "ok": True,
            "persist_dir": persist_dir,
            "collections": collection_info,
            "total_collections": len(collection_info)
        }
        
    except Exception as e:
        print(f"❌ 调试失败: {str(e)}")
        return {
            "ok": False,
            "error": str(e),
            "persist_dir": persist_dir if 'persist_dir' in locals() else "unknown"
        }

@app.post("/ai/ask")
async def ask_question(request: AskRequest, http_request: Request):
    """问答接口 - 兼容原有API，智能增强"""
    try:
        # 限流与并发控制
        client_ip = http_request.client.host if http_request.client else "0.0.0.0"
        key = _rate_key(request.userId, request.courseId, client_ip)
        allowed, retry_after = check_rate_limit(key, AI_ASK_RPM)
        if not allowed:
            raise HTTPException(status_code=429, detail=f"请求过于频繁，请 {retry_after} 秒后再试")
        async with ask_semaphore:
            print(f"📥 收到问答请求: userId={request.userId}, courseId={request.courseId}")
            print(f"❓ 问题: {request.question}")
            logger.info(f"🧠 智能问答: {request.question[:50]}...")
            
            # 检索相关文档
            results = rag.query(
                request.question,
                request.userId,
                request.courseId,
                top_k=request.topK,
                allowed_doc_ids=request.docIds
            )
            print(f"🔍 检索结果数量: {len(results)}")
            
            if results:
                for i, (text, meta, _score) in enumerate(results[:2]):
                    print(f"📄 片段{i+1}: {text[:100]}... (来源: {meta.get('filename', '未知')})")
            
            logger.info(f"🔍 检索到 {len(results)} 个相关文档片段")
            
            if results and (request.useAI is None or request.useAI is True):
                # 构建上下文
                context = "\n\n".join([
                    f"📄 {meta.get('filename', '未知文档')}: {text[:500]}"
                    for text, meta, _score in results[:3]
                ])
                
                # 使用AI生成回答
                ai_response = await unified_ai.generate_response(
                    prompt=request.question,
                    context=context,
                    model="auto"
                )
                
                answer = ai_response["content"]
            else:
                # 无文档或关闭AI时的增强规则回答
                try:
                    simple_results = [(text, meta) for (text, meta, _score) in results] if results else []
                except Exception:
                    simple_results = []
                if simple_results:
                    answer = generate_enhanced_answer(request.question, simple_results)
                else:
                    # 无文档且未启用AI时，给出通用建议
                    ai_response = await unified_ai.generate_response(
                        prompt=request.question,
                        context="",
                        model="auto"
                    )
                    answer = ai_response["content"] + "\n\n💡 *如需基于特定资料的回答，请上传相关文档*"
            
            # 构建引用来源
            source_docs = []
            for i, (text, meta, score) in enumerate(results, 1):
                filename = meta.get("filename", "未知文档")
                preview = text[:120].replace("\n", " ").strip()
                if len(text) > 120:
                    preview += "..."
                item = {
                    "index": i,
                    "filename": filename,
                    "preview": preview,
                    "doc_id": meta.get("doc_id"),
                    "score": score
                }
                # 保持兼容：字符串和对象两种形式都返回
                source_docs.append(item)
            
            return {"answer": answer, "source_docs": source_docs}
        
    except Exception as e:
        logger.error(f"❌ 问答失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"问答处理失败: {str(e)}")

@app.post("/ai/query/intelligent")
async def intelligent_query(request: IntelligentQueryRequest):
    """智能问答 - IMA增强版"""
    try:
        logger.info(f"🧠 智能查询: {request.question[:50]}... (模式: {request.queryMode})")
        
        # 检索相关文档
        results = rag.query(
            request.question,
            request.userId,
            request.courseId,
            top_k=request.topK
        )
        
        if request.useAI and results:
            # 构建上下文
            context = "\n\n".join([
                f"📄 {meta.get('filename', '未知文档')}: {text[:500]}"
                for text, meta, _score in results[:3]
            ])
            
            # 根据查询模式调整提示
            if request.queryMode == "precise":
                prompt = f"请基于提供的文档内容，准确回答问题：{request.question}"
            elif request.queryMode == "creative":
                prompt = f"基于文档内容，创造性地回答问题：{request.question}"
            else:  # hybrid
                prompt = f"综合文档内容和相关知识，全面回答问题：{request.question}"
            
            # 调用AI
            ai_response = await unified_ai.generate_response(
                prompt=prompt,
                context=context,
                model=request.aiModel
            )
            
            answer = ai_response["content"]
            ai_info = {
                "model_used": ai_response["model_used"],
                "success": ai_response["success"]
            }
        else:
            # 降级回答
            if results:
                answer_parts = ["📚 **基于文档的智能分析**\n\n"]
                for i, (text, meta, _score) in enumerate(results[:3], 1):
                    filename = meta.get("filename", "未知文档")
                    clean_text = text.replace('\n', ' ').strip()[:300]
                    answer_parts.append(f"**📄 片段 {i}** (来源: {filename})\n{clean_text}\n")
                answer_parts.append("\n💡 *启用AI模型可获得更智能的分析*")
                answer = "\n".join(answer_parts)
            else:
                answer = f"抱歉，在您的知识库中没有找到与「{request.question}」相关的内容。\n\n建议：\n1. 尝试使用不同的关键词\n2. 上传相关文档到知识库"
            
            ai_info = {"fallback_used": True}
        
        # 构建引用来源
        source_docs = []
        for i, (text, meta, _score) in enumerate(results, 1):
            filename = meta.get("filename", "未知文档")
            preview = text[:120].replace("\n", " ").strip()
            if len(text) > 120:
                preview += "..."
            source_docs.append(f"📄 [{i}] {filename}: {preview}")
        
        return {
            "success": True,
            "data": {
                "answer": answer,
                "source_docs": source_docs,
                "query_mode": request.queryMode,
                "result_count": len(results),
                "ai_info": ai_info
            }
        }
        
    except Exception as e:
        logger.error(f"❌ 智能查询失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"查询失败: {str(e)}")

@app.post("/ai/writing/assist")
async def writing_assistant(request: WritingAssistRequest):
    """智能写作助手"""
    try:
        logger.info(f"✍️ 写作助手: {request.task}")
        
        # 构建写作提示词
        task_prompts = {
            "summarize": f"请为以下内容生成简洁的摘要：\n\n{request.content}",
            "expand": f"请将以下内容进行扩展和详细阐述：\n\n{request.content}",
            "translate": f"请将以下内容翻译为{'中文' if request.language == 'zh' else '英文'}：\n\n{request.content}",
            "polish": f"请优化以下内容的表达，使其更加{get_style_desc(request.style)}：\n\n{request.content}",
            "outline": f"请为以下内容生成详细的大纲结构：\n\n{request.content}"
        }
        
        prompt = task_prompts.get(request.task, task_prompts["polish"])
        
        # 调用AI
        ai_response = await unified_ai.generate_response(
            prompt=prompt,
            model="auto"
        )
        
        return {
            "success": True,
            "data": {
                "result": ai_response["content"],
                "task": request.task,
                "style": request.style,
                "language": request.language,
                "model_used": ai_response["model_used"]
            }
        }
        
    except Exception as e:
        logger.error(f"❌ 写作助手失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"写作助手失败: {str(e)}")

# === 辅助函数 ===

def get_style_desc(style: str) -> str:
    """获取风格描述"""
    style_map = {
        "formal": "正式和专业",
        "casual": "轻松和自然", 
        "academic": "学术和严谨",
        "creative": "创意和生动"
    }
    return style_map.get(style, "清晰和易懂")

@app.get("/ai/models/available")
async def get_available_models():
    """获取可用的AI模型"""
    return {
        "success": True,
        "data": {
            "available_models": unified_ai.get_available_models(),
            "best_model": unified_ai.get_best_model(),
            "models": {
                name: {"available": config["available"]}
                for name, config in unified_ai.models.items()
            }
        }
    }

if __name__ == "__main__":
    import uvicorn
    print("🚀 启动统一IMA风格AI服务...")
    print("🎯 单端口 8000，全功能集成")
    print("📡 兼容原有API，支持IMA增强功能")
    print("🤖 多模型支持：通义千问、DeepSeek、OpenAI")
    print("🔧 智能降级：AI失败时自动切换到规则回答")
    print("\n📋 API接口：")
    print("  /ai/health - 健康检查")
    print("  /ai/docs/upload - 文档上传")
    print("  /ai/ask - 问答接口（兼容原版）")
    print("  /ai/query/intelligent - 智能问答（IMA增强）")
    print("  /ai/writing/assist - 写作助手")
    print("  /ai/models/available - 可用模型")
    print("\n⏳ 启动中...")
    
    uvicorn.run(
        app, 
        host="0.0.0.0", 
        port=8000,
        log_level="info"
    )
