﻿from __future__ import annotations

from typing import Dict, List, Tuple

# 暂时回退到原来的导入方式，避免环境问题
from langchain_community.vectorstores import Chroma
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI

from ingest import build_embeddings
from settings import load_settings
from adapter.diting_optimization import (
    DitingOptimizationClient,
    OptimizationClientError,
)


class RAGService:
    """Load the persisted vector store and answer questions via RAG."""

    def __init__(self) -> None:
        self.settings = load_settings()
        self.embeddings = build_embeddings(
            api_key=self.settings.api_key or "",
            api_base=self.settings.api_base,
            model=self.settings.embed_model,
        )
        self.vectorstore = Chroma(
            persist_directory=str(self.settings.persist_dir),
            embedding_function=self.embeddings,
        )
        self.prompt = PromptTemplate(
            input_variables=["context", "question"],
            template=(
                "你是一名专业助手，请基于下列检索结果回答用户问题。\n\n"
                "【检索内容】\n{context}\n\n"
                "【问题】\n{question}\n\n"
                "请用中文给出准确、简洁的回答。如无法从文档中得出结论，请明确说明。"
            ),
        )

        llm_kwargs: Dict[str, object] = {
            "model": self.settings.llm_model,
            "temperature": self.settings.temperature,
        }
        if self.settings.api_key:
            llm_kwargs["api_key"] = self.settings.api_key
        if self.settings.api_base:
            llm_kwargs["base_url"] = self.settings.api_base

        self.llm = ChatOpenAI(**llm_kwargs)
        self._optimization_client = DitingOptimizationClient(self.settings)

    def query(self, question: str, *, with_metadata: bool = True) -> Tuple[str, List[Dict[str, object]]]:
        """Run a single-turn RAG query and optionally return source metadata."""
        cleaned = question.strip()
        if not cleaned:
            raise ValueError("Question must not be empty.")

        results = self.vectorstore.similarity_search_with_score(
            cleaned, k=self.settings.search_k
        )
        if not results:
            raise ValueError("No matching context found. Run ingest.py to build the store.")

        context = "\n\n".join(doc.page_content for doc, _ in results)
        prompt_text = self.prompt.format(context=context, question=cleaned)

        llm_output = self.llm.invoke(prompt_text)
        answer = llm_output.content if hasattr(llm_output, "content") else str(llm_output)

        sources: List[Dict[str, object]] = []
        if with_metadata:
            for doc, score in results:
                sources.append(
                    {
                        "source": doc.metadata.get("source"),
                        "score": score,
                        "metadata": dict(doc.metadata),
                    }
                )

        return answer, sources

    def optimize_pipeline(self, payload: Dict[str, object]) -> Tuple[Dict[str, object], Dict[str, object]]:
        """Request best parameters from diting optimization service and return them."""
        try:
            best_parameters, full_response = self._optimization_client.optimize_rag(payload)
        except OptimizationClientError as exc:
            raise RuntimeError(f"Optimization service error: {exc}") from exc

        return best_parameters, full_response
