"""
Knowledge-Augmented Question Answering Pipeline (OOP version, LLM-based NER, local BGE/Rerank retrieval)
"""

from __future__ import annotations

from dataclasses import dataclass
from typing import List, Tuple, Optional
import logging

# ──────────────────────────────────────────────────────────────────────────────
#  Data containers
# ──────────────────────────────────────────────────────────────────────────────
@dataclass
class Entity:
    text: str
    span: Tuple[int, int]
    wiki_id: Optional[str] = None
    score: float = 0.0

    def __hash__(self):
        return hash((self.text, self.span))

@dataclass
class RetrievalResult:
    doc_id: str
    text: str
    score: float

# ──────────────────────────────────────────────────────────────────────────────
#  Stage 1 – 大模型做实体识别
# ──────────────────────────────────────────────────────────────────────────────
class LLMNER:
    """通过本地大模型使用提示词的方式来进行实体识别"""
    def __init__(self, llm_client):
        self.llm = llm_client  # assume llm_client has generate(prompt: str) -> str

    def extract(self, question: str) -> List[Entity]:
        prompt = (
            "请识别出问题中所有具有具体名称的对象，按JSON数组返回：\n"
            f"问题：{question}"
        )
        result = self.llm.generate(prompt)
        import json
        try:
            spans = json.loads(result)
        except Exception:
            spans = []
        return [Entity(text=s, span=(question.find(s), question.find(s) + len(s))) for s in spans]

# ──────────────────────────────────────────────────────────────────────────────
#  Stage 2 – Entity Linking (stub)
# ──────────────────────────────────────────────────────────────────────────────
class EntityLinker:
    def link(self, entities: List[Entity]) -> List[Entity]:
        for e in entities:
            e.wiki_id = None  # placeholder
        return entities

# ──────────────────────────────────────────────────────────────────────────────
#  Stage 3 – Topic Entity Pruning
# ──────────────────────────────────────────────────────────────────────────────
class TopicPruner:
    def __init__(self, llm_client):
        self.llm = llm_client

    def prune(self, question: str, entities: List[Entity], m: int = 3) -> List[Entity]:
        for e in entities:
            prompt = f"问题：{question}\n实体：{e.text}\n该实体是否能构建知识图谱帮助解答该问题？回答Yes或No。"
            ans = self.llm.generate(prompt).lower()
            e.score = 1.0 if 'yes' in ans else 0.0
        return sorted(entities, key=lambda e: e.score, reverse=True)[:m]

# ──────────────────────────────────────────────────────────────────────────────
#  Stage 4 – Dense Retrieval + Rerank
# ──────────────────────────────────────────────────────────────────────────────
class DenseRetriever:
    def __init__(self, embed_model, rerank_model, faiss_index, doc_map):
        self.embed_model = embed_model  # e.g., BGE
        self.rerank_model = rerank_model  # e.g., local rerank LLM or model
        self.index = faiss_index
        self.doc_map = doc_map

    def search(self, query: str, k: int = 10) -> List[RetrievalResult]:
        vec = self.embed_model.encode([query], normalize_embeddings=True)
        import numpy as np
        scores, ids = self.index.search(np.array(vec).astype("float32"), k)
        candidates = [RetrievalResult(doc_id=str(i), text=self.doc_map[str(i)], score=s) for i, s in zip(ids[0], scores[0])]
        reranked = self.rerank_model.rerank(query, candidates)
        return reranked

# ──────────────────────────────────────────────────────────────────────────────
#  Stage 5 – Answer Judgement
# ──────────────────────────────────────────────────────────────────────────────
class AnswerJudger:
    def __init__(self, llm_client):
        self.llm = llm_client

    def judge(self, question: str, context: List[RetrievalResult]) -> str:
        ctx = "\n\n".join([p.text for p in context])
        prompt = (
            f"Here is the context:\n{ctx}\n\n"
            f"Question: {question}\n"
            "Can this context support answering the question? If yes, please answer; if not, reply 'Insufficient'."
        )
        return self.llm.generate(prompt)

# ──────────────────────────────────────────────────────────────────────────────
#  Pipeline Orchestrator
# ──────────────────────────────────────────────────────────────────────────────
class KGQAEngine:
    def __init__(self, ner, linker, pruner, retriever, judger):
        self.ner = ner
        self.linker = linker
        self.pruner = pruner
        self.retriever = retriever
        self.judger = judger

    def answer(self, question: str) -> str:
        ents = self.ner.extract(question)
        linked = self.linker.link(ents)
        topics = self.pruner.prune(question, linked)
        query = question + " " + " ".join(e.text for e in topics)
        ctx = self.retriever.search(query)
        return self.judger.judge(question, ctx)
