import torch
from typing import List, Tuple, AsyncGenerator
from sqlalchemy.ext.asyncio import AsyncSession

from .ollama_service import OllamaService
from ..schemas.schemas import (
    ThinkingMessage, TextChunkMessage, ImageInfoMessage, ErrorMessage, EndMessage
)
from ..utils.rag_util import retrieve_image_and_text_vectors, get_embedding
from core.config import settings
from src.modules.ai_models.model_loaders.model_loader import get_reranker_model

class RAGService:
    """RAG核心业务服务"""
    def __init__(self):
        self.ollama_service = OllamaService()
        self.tokenizer = None
        self.reranker = None
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self._load_reranker_model()

    def _load_reranker_model(self):
        """加载重排序模型"""
        try:
            reranker_model = get_reranker_model(settings.BGE_RERANKER_PATH)
            self.tokenizer = reranker_model.tokenizer
            self.reranker = reranker_model.model
            print(f"✅ BGE Reranker 模型已加载: {settings.BGE_RERANKER_PATH} (device: {self.device})")
        except Exception as e:
            print(f"❌ 加载 BGE Reranker 模型失败: {e}")
            self.tokenizer = self.reranker = None

    async def process_rag_query(
            self,
            know_base_ids: List[str],
            question: str,
            session: AsyncSession
    ) -> AsyncGenerator[str, None]:
        """处理RAG查询并生成流式响应"""
        # 参数验证
        if not question or not question.strip():
            error_data = ErrorMessage(message="问题不能为空")
            yield f"data: {error_data.model_dump_json()}\n\n"
            return
        try:
            # 1. 检索文本和图片向量
            texts, images = await retrieve_image_and_text_vectors(
                know_base_ids, question, session
            )
            # 2. 重排序逻辑
            reranked_texts = self._rerank_texts(question, texts)
            # 3. 构建上下文和Prompt
            prompt = self._build_rag_prompt(question, reranked_texts, images)
            # 4. 流式生成响应
            async for chunk in self._generate_stream_response(prompt, images):
                yield chunk

        except Exception as e:
            error_data = ErrorMessage(message=str(e))
            yield f"data: {error_data.model_dump_json()}\n\n"

    def _rerank_texts(self, question: str, texts: List[str]) -> List[str]:
        """重排序文本"""
        if not texts or self.tokenizer is None or self.reranker is None:
            return texts[:3]

        try:
            with torch.no_grad():
                sentence_pairs = [[question, text] for text in texts]
                features = self.tokenizer(
                    sentence_pairs, padding=True, truncation=True,
                    return_tensors="pt", max_length=512
                )
                features = {k: v.to(self.device) for k, v in features.items()}
                scores = self.reranker(**features).logits.squeeze(dim=-1)
                scores = scores.float().cpu().tolist()

                scored_texts = list(zip(texts, scores))
                scored_texts.sort(key=lambda x: x[1], reverse=True)
                return [text for text, _ in scored_texts[:3]]
        except Exception as e:
            print(f"⚠️ Reranking 过程出错: {e}")
            return texts[:3]

    def _build_rag_prompt(self, question: str, texts: List[str], images: List[Tuple[str, str]]) -> str:
        """构建RAG提示词"""
        context_parts = []
        context_parts.extend(texts)

        # 添加图片描述到上下文
        for i, (desc, img_path) in enumerate(images):
            context_parts.append(f"图片{i + 1}: {desc}")

        context = "\n".join(context_parts)

        return f"""你是一个知识助手，请根据以下内容回答问题。请注意：
                    1. 请以图文结合的形式组织回答
                    2. 使用"下面的图片展示了..."这样的句式引入图片描述
                    3. 使用Markdown格式，确保图片引用格式正确
                    4. 专注于文本内容的分析和总结

                    上下文：
                    {context}

                    问题：
                    {question}

                    请提供图文结合的回答，按照"描述 + 图片引用"的格式组织内容："""

    async def _generate_stream_response(
            self,
            prompt: str,
            images: List[Tuple[str, str]]
    ) -> AsyncGenerator[str, None]:
        """生成流式响应"""
        # 发送思考中消息
        thinking_data = ThinkingMessage()
        yield f"data: {thinking_data.model_dump_json()}\n\n"

        # 流式生成文本
        async for chunk in self.ollama_service.generate_stream(prompt):
            text_data = TextChunkMessage(text=chunk)
            yield f"data: {text_data.model_dump_json()}\n\n"

        # 发送图片信息
        if images:
            images_meta = []
            for i, (desc, img_path) in enumerate(images):
                images_meta.append({
                    "index": i + 1,
                    "description": desc,
                    "path": img_path,
                    "url": img_path
                })

            images_data = ImageInfoMessage(images=images_meta)
            yield f"data: {images_data.model_dump_json()}\n\n"

        # 结束标志
        end_data = EndMessage()
        yield f"data: {end_data.model_dump_json()}\n\n"