"""
RAG服务
负责实现检索增强生成(Retrieval-Augmented Generation)
"""
import logging
from typing import List

from django.conf import settings
from django.db.models import Q
from openai import OpenAI

from apps.knowledge.models import Document
from apps.knowledge.vector_store import VectorStoreService

logger = logging.getLogger('pioneer_xiaoke')


class RAGService:
    """RAG服务类"""

    def __init__(self):
        self.api_key = settings.DEEPSEEK_API_KEY
        self.api_base = settings.DEEPSEEK_API_BASE
        self.client = OpenAI(api_key=self.api_key, base_url=self.api_base)
        self._vector_service = None

    def generate_answer(self, question, user, knowledge_base_id=None, use_rag=True, top_k=3):
        """
        生成答案

        Args:
            question: 用户问题
            user: 用户对象
            knowledge_base_id: 知识库ID
            use_rag: 是否使用RAG
            top_k: 检索文档数量

        Returns:
            dict: 包含答案、检索文档等信息
        """
        retrieved_docs = []

        # 如果使用RAG,先进行文档检索
        if use_rag and knowledge_base_id:
            retrieved_docs = self._retrieve_documents(
                question=question,
                user=user,
                knowledge_base_id=knowledge_base_id,
                top_k=top_k
            )

        # 构建上下文
        context = self._build_context(retrieved_docs)

        # 调用DeepSeek API生成答案
        answer_data = self._call_deepseek_api(question, context)

        return {
            'answer': answer_data['content'],
            'retrieved_docs': retrieved_docs,
            'model': answer_data.get('model', 'deepseek-chat'),
            'tokens': answer_data.get('tokens')
        }

    def _retrieve_documents(self, question, user, knowledge_base_id=None, top_k=3):
        """
        优先使用向量检索，失败时回退到关键词检索
        """
        if not knowledge_base_id:
            return []

        vector_docs = self._vector_search(question, user, knowledge_base_id, top_k)
        if vector_docs:
            return vector_docs

        return self._keyword_search(question, user, knowledge_base_id, top_k)

    def _get_vector_service(self):
        if self._vector_service is None:
            self._vector_service = VectorStoreService()
        return self._vector_service

    def _vector_search(self, question, user, knowledge_base_id, top_k) -> List[dict]:
        try:
            service = self._get_vector_service()
            results = service.query(knowledge_base_id, question, top_k=top_k)
        except Exception as exc:
            logger.error(f"向量检索失败: {exc}")
            return []

        if not results:
            return []

        document_ids = [
            item.get('metadata', {}).get('document_id')
            for item in results
            if item.get('metadata', {}).get('document_id')
        ]
        if not document_ids:
            return []

        docs = Document.objects.filter(id__in=document_ids, user=user, status='completed')
        doc_map = {doc.id: doc for doc in docs}

        retrieved_docs = []
        for item in results:
            metadata = item.get('metadata', {})
            doc_id = metadata.get('document_id')
            doc = doc_map.get(doc_id)
            if not doc:
                continue

            retrieved_docs.append({
                'document_id': doc.id,
                'title': doc.title,
                'content': item.get('content', '')[:800],
                'score': self._distance_to_score(item.get('distance')),
                'chunk_index': metadata.get('chunk_index'),
                'vector_id': item.get('vector_id'),
            })

        logger.info(f"向量检索命中 {len(retrieved_docs)} 条结果")
        return retrieved_docs

    def _keyword_search(self, question, user, knowledge_base_id, top_k):
        try:
            query = Q(user=user, status='completed', knowledge_base_id=knowledge_base_id)

            keywords = question.split()[:5]
            if not keywords:
                return []

            content_query = Q()
            for keyword in keywords:
                content_query |= Q(content__icontains=keyword) | Q(title__icontains=keyword)

            documents = Document.objects.filter(query & content_query)[:top_k]
            retrieved_docs = []
            for doc in documents:
                content_preview = doc.content[:500] if doc.content else ""
                retrieved_docs.append({
                    'document_id': doc.id,
                    'title': doc.title,
                    'content': content_preview,
                    'score': 0.5,
                })

            logger.info(f"关键词检索命中 {len(retrieved_docs)} 条结果")
            return retrieved_docs

        except Exception as exc:
            logger.error(f"关键词检索失败: {exc}")
            return []

    @staticmethod
    def _distance_to_score(distance):
        if distance is None:
            return 0.0
        # Chroma 默认返回距离（越小越好），转换为 [0,1] 相似度分数
        score = max(0.0, 1 - float(distance))
        return round(score, 4)

    def _build_context(self, retrieved_docs):
        """
        构建上下文

        Args:
            retrieved_docs: 检索到的文档列表

        Returns:
            str: 构建的上下文
        """
        if not retrieved_docs:
            return ""

        context_parts = []
        for i, doc in enumerate(retrieved_docs, 1):
            context_parts.append(f"[文档{i}]\n{doc.get('content', '')}\n")

        return "\n".join(context_parts)

    def _call_deepseek_api(self, question, context):
        """
        调用DeepSeek API生成答案
        """
        try:
            # 构建系统提示词
            system_prompt = """你是"先锋小科"智能学习助手，专门帮助大学生解答学习问题。

你的职责：
1. 优先基于提供的参考资料回答问题
2. 如果参考资料不足，可以结合你的知识补充回答
3. 回答要准确、简洁、易懂
4. 使用友好的语气，像学长/学姐一样帮助学生
5. 对于复杂问题，可以分步骤讲解"""

            # 构建用户提示词
            if context:
                user_prompt = f"""参考资料：
{context}

---
学生的问题：{question}

请基于上述参考资料回答问题。如果参考资料中没有相关信息，请基于你的知识回答，并明确说明这部分内容来自你的知识库而非参考资料。"""
            else:
                user_prompt = f"""学生的问题：{question}

请帮助回答这个问题。"""

            # 调用 DeepSeek API
            logger.info(f"调用DeepSeek API，问题: {question[:50]}...")

            response = self.client.chat.completions.create(
                model="deepseek-chat",
                messages=[
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": user_prompt}
                ],
                temperature=0.7,
                max_tokens=2000,
                stream=False  # 后续可以改为 True 实现流式输出
            )

            # 提取响应内容
            answer_content = response.choices[0].message.content
            total_tokens = response.usage.total_tokens if hasattr(response, 'usage') else None

            logger.info(f"DeepSeek API 调用成功，使用 tokens: {total_tokens}")

            return {
                'content': answer_content,
                'model': response.model,
                'tokens': total_tokens
            }

        except Exception as e:
            logger.error(f"调用DeepSeek API失败: {str(e)}")
            raise Exception(f"AI服务暂时不可用，请稍后再试。错误信息: {str(e)}")
