"""
Query service for RAG functionality.
"""
import os
from typing import Dict, List, Optional
from openai import OpenAI

from app.reranker import Reranker


class QueryService:
    """Handles query processing and answer generation."""
    
    def __init__(self, vector_store):
        """
        Initialize query service.
        
        Args:
            vector_store: VectorStore instance
        """
        self.vector_store = vector_store
        self.openai_api_key = os.getenv("API_KEY")
        self.base_url = os.getenv("BASE_URL" )
        self.chat_model = os.getenv("MODEL")
        self.retrieval_candidates = int(os.getenv("RETRIEVAL_CANDIDATES", "12"))
        self.openai_client = None
        #调试打印
        print("openai_api_key: ",self.openai_api_key)
        print("base_url: ",self.base_url)
        print("chat_model: ",self.chat_model)
        print("retrieval_candidates: ",self.retrieval_candidates)
        #初始化reranker
        self.reranker = Reranker()
        #调试打印
        print("reranker: ",self.reranker)
        
        if self.openai_api_key:
            try:
                self.openai_client = OpenAI(
                    api_key=self.openai_api_key,
                    base_url=self.base_url
                 
                )
                print("openai_client: ",self.openai_client)
            except Exception:
                self.openai_client = None
    
    async def query(self, question: str, top_k: int = 4) -> Dict:
        """
        Process a query and return answer with evidence.
        
        Args:
            question: User question
            top_k: Number of evidence chunks to retrieve
        
        Returns:
            Dictionary with 'answer' and 'evidence' keys
        """
        # Get embedding for the question
        query_embedding = await self._get_embedding(question)
        
        if not query_embedding:
            return {
                "answer": "Unable to generate embeddings. Please set OPENAI_API_KEY.",
                "evidence": []
            }
        
        # Search for relevant chunks
        candidate_k = max(top_k, self.retrieval_candidates)
        search_results = self.vector_store.search(
            query_embeddings=[query_embedding],
            top_k=candidate_k
        )
        
        # Format evidence
        evidence = self._format_evidence(search_results)
        evidence = self._rerank_evidence(question, evidence, top_k)
        
        # Generate answer if OpenAI is available
        if self.openai_client and evidence:
            answer = await self._generate_answer(question, evidence)
        else:
            answer = (
                "I found relevant information, but cannot generate an answer "
                "without OPENAI_API_KEY. Please see the evidence below."
            )
        
        return {
            "answer": answer,
            "evidence": evidence
        }
    
    async def _get_embedding(self, text: str) -> Optional[List[float]]:
        """
        Get embedding for text.
        Uses the same embedding service as document upload.
        
        Args:
            text: Text to embed
        
        Returns:
            Embedding vector or None
        """
        # Import here to avoid circular dependency
        from app.embedding_service import EmbeddingService
        
        embedding_service = EmbeddingService()
        
        if not embedding_service.is_available():
            return None
        
        try:
            embeddings = embedding_service.get_embeddings([text])
            if embeddings and len(embeddings) > 0:
                return embeddings[0]
            return None
        except Exception as e:
            print(f"Error getting embedding: {e}")
            return None
    
    async def _generate_answer(self, question: str, evidence: List[Dict]) -> str:
        """
        Generate answer using OpenAI ChatCompletion.
        
        Args:
            question: User question
            evidence: List of evidence chunks
        
        Returns:
            Generated answer
        """
        if not self.openai_client:
            return "OpenAI API not available."
        
        # Build context from evidence
        context = "\n\n".join([
            f"[Chunk {i+1}]: {chunk['chunk']}"
            for i, chunk in enumerate(evidence)
        ])
        
        # Create prompt
        prompt = f"""Based on the following context, please answer the question.
If the context doesn't contain enough information, say so.

Context:
{context}

Question: {question}

Answer:"""
        
        try:
            response = self.openai_client.chat.completions.create(
                model=self.chat_model,
                messages=[
                    {"role": "system", "content": "You are a helpful assistant that answers questions based on provided context."},
                    {"role": "user", "content": prompt}
                ],
                temperature=0.7,
                max_tokens=500
            )
            return response.choices[0].message.content.strip()
        except Exception as e:
            return f"Error generating answer: {str(e)}"
    
    def _format_evidence(self, search_results: Dict) -> List[Dict]:
        """
        Format search results into evidence format.
        
        Args:
            search_results: ChromaDB search results
        
        Returns:
            List of evidence dictionaries
        """
        evidence = []
        
        if not search_results or "documents" not in search_results:
            return evidence
        
        documents = search_results.get("documents", [[]])[0]
        metadatas = search_results.get("metadatas", [[]])[0]
        distances = search_results.get("distances", [[]])[0]
        
        for i, doc in enumerate(documents):
            evidence.append({
                "chunk": doc,
                "meta": metadatas[i] if i < len(metadatas) else {},
                "distance": distances[i] if i < len(distances) else None
            })
        
        return evidence

    def _rerank_evidence(self, question: str, evidence: List[Dict], top_k: int) -> List[Dict]:
        """
        Optionally rerank evidence using cross-encoder model to improve precision.
        """
        if not evidence:
            return evidence

        if self.reranker and self.reranker.is_available():
            return self.reranker.rerank(question, evidence, top_k)

        return evidence[:top_k]

