import os
import logging
from typing import Any, Dict, List, Optional


try:
    from .boss_guide_hybrid_search import HybridGuideSearch
except ImportError:
    from utils.use_file.boss_guide_hybrid_search import HybridGuideSearch

try:
    from openai import OpenAI
except Exception as e:
    OpenAI = None  # 延迟在运行时报错


logger = logging.getLogger(__name__)


class LLMResponder:
    def __init__(self, model: str = "qwen-plus") -> None:
        api_key = os.getenv("DASHSCOPE_API_KEY")
        if not api_key:
            raise ValueError("请设置 DASHSCOPE_API_KEY 环境变量")
        if OpenAI is None:
            raise RuntimeError("openai SDK 未安装，请在 requirements 中确保可用")

        self.client = OpenAI(
            api_key=api_key,
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )
        self.model = model

    def build_prompt(self, question: str, contexts: List[Dict[str, Any]]) -> List[Dict[str, str]]:
        """
        将混合检索得到的片段组织为系统/用户消息，提示模型基于事实作答。
        """
        context_texts = []
        for i, c in enumerate(contexts, 1):
            meta = [
                f"章节: {c.get('chapter','')}",
                f"页码: {c.get('pdf_page','')}",
                f"类型: {('图片说明' if c.get('image_path') else c.get('content_type',''))}",
            ]
            snippet = c.get("content", "")
            image_path = c.get("image_path")
            if image_path:
                context_texts.append(f"[片段{i}] {' | '.join(meta)}\n图片路径: {image_path}\n{snippet}")
            else:
                context_texts.append(f"[片段{i}] {' | '.join(meta)}\n{snippet}")

        chapters = [c.get("chapter", "") for c in contexts if c.get("chapter")]
        chapters = [ch for i, ch in enumerate(chapters) if ch and ch not in chapters[:i]]

        system_msg = (
            "你是一个可靠的企业招聘与求职平台助手。\n"
            "请严格以提供的资料片段为依据进行回答，并在相关处引用片段编号（如[片段1]）。\n"
            "若片段未直接给出完整结论，可做谨慎归纳总结，但必须基于片段内容，不得编造。\n"
            "仅当片段与问题明显无关时，再说明‘当前资料无法支持该结论’。\n"
            f"参考范围章节：{', '.join(chapters) if chapters else '未知'}"
        )

        user_msg = (
            f"用户问题：{question}\n\n"
            f"可用资料（请仅基于这些内容作答）：\n\n"
            + "\n\n".join(context_texts)
        )

        return [
            {"role": "system", "content": system_msg},
            {"role": "user", "content": user_msg},
        ]

    def answer(self, question: str, contexts: List[Dict[str, Any]], max_tokens: int = 512) -> str:
        messages = self.build_prompt(question, contexts)
        print(f"最后的提示词:{messages}")
        resp = self.client.chat.completions.create(
            model=self.model,
            messages=messages,
            temperature=0.2,
            max_tokens=max_tokens,
        )
        return resp.choices[0].message.content or ""


def run_demo() -> None:
    """固定示例：用混合检索取上下文，调用通义进行回答。"""
    logging.basicConfig(level=logging.INFO)

    user_question = "图片说明:如何进行面试邀约与进度跟踪"
    topk = 12

    # 1) 取得混合检索上下文
    hs = HybridGuideSearch()
    # 若需要仅按某类过滤，可设置 content_type = "表格" 或 "图片关联文本"
    # 初次检索（仅关键词，偏召回）
    contexts = hs.hybrid_search(query=user_question, query_embedding=None, size=topk, weight_es=0.7, weight_vec=0.3)
    if not contexts:
        logger.warning("未检索到上下文，请先运行向量/ES入库流程")
        return

    # 2) 生成查询向量 + 再跑一遍融合（向量参与）以提升相关性
    try:
        from .text_embedding import TextEmbedding
    except ImportError:
        from utils.use_file.text_embedding import TextEmbedding

    embedder = TextEmbedding()
    q_emb = embedder.generate_embeddings(user_question)[0]
    contexts = hs.hybrid_search(query=user_question, query_embedding=q_emb, size=topk, weight_es=0.6, weight_vec=0.4)

    # 去重与类型优先级重排
    def postprocess_contexts(items: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        # 1) 去重（优先按 text_block_id ；无则按内容前 80 字）
        seen_keys = set()
        deduped: List[Dict[str, Any]] = []
        for it in items:
            key = it.get("metadata", {}).get("text_block_id") or it.get("text_block_id")
            if not key:
                key = (it.get("content") or "")[:80]
            if key in seen_keys:
                continue
            seen_keys.add(key)
            deduped.append(it)

        # 2) 类型优先级：图片关联文本 > 表格 > 基础文本
        priority = {"图片关联文本": 0, "表格": 1, "基础文本": 2}
        def get_type_rank(it: Dict[str, Any]) -> int:
            ctype = it.get("content_type") or it.get("metadata", {}).get("content_type") or ("图片关联文本" if it.get("image_path") else "基础文本")
            return priority.get(ctype, 3)

        deduped.sort(key=lambda x: get_type_rank(x))
        return deduped

    contexts = postprocess_contexts(contexts)

    # 若缺少图片说明片段，则优先按“图片关联文本”过滤重试
    if not any((c.get("image_path") or "") for c in contexts):
        pic_ctx = hs.hybrid_search(query=user_question, query_embedding=q_emb, size=topk, content_type="图片关联文本", weight_es=0.6, weight_vec=0.4)
        if pic_ctx:
            contexts = pic_ctx
        else:
            # 再尝试表格
            tbl_ctx = hs.hybrid_search(query="JD 要素 对比 表格", query_embedding=None, size=topk, content_type="表格", weight_es=0.8, weight_vec=0.2)
            if tbl_ctx:
                contexts = tbl_ctx

    # 再次做统一的去重与类型优先排序
    contexts = postprocess_contexts(contexts)

    # 3) LLM 回答
    llm = LLMResponder(model=os.getenv("QWEN_CHAT_MODEL", "qwen-plus"))
    answer = llm.answer(user_question, contexts)

    print("=== 用户问题 ===")
    print(user_question)
    print("\n=== 参考片段(TopN) ===")
    for i, c in enumerate(contexts, 1):
        print(f"[片段{i}] 章节={c.get('chapter')} 页={c.get('pdf_page')} 类型={c.get('content_type')}\n{c.get('content','')[:200]}...")
    print("\n=== 答案 ===")
    print(answer)


if __name__ == "__main__":
    run_demo()