
import os
import logging
from typing import Any, Dict, List, Optional, Sequence, Tuple

# Elasticsearch DSL 8.18.0
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import NotFoundError
from elasticsearch_dsl import Document, Text, Keyword, Integer, Float, Nested, connections, Index

# Milvus 2.6.0
from pymilvus import MilvusClient, DataType


logger = logging.getLogger(__name__)


# =========================
# Elasticsearch 文档存储层
# =========================
class GuideDoc(Document):
    # 主体文本
    content = Text(analyzer="ik_max_word", search_analyzer="ik_smart")

    # 元数据字段（关键可检索/过滤字段）
    text_block_id = Keyword()  # 来自 pdf_parser.add_metadata 生成的唯一ID
    user_role = Keyword()
    chapter = Keyword()
    content_type = Keyword()
    source_mark = Keyword()
    pdf_page = Integer()
    image_path = Keyword()

    # 向量相似度的冗余（可选：用于近似过滤或排序加权）
    vector_score = Float()

    class Index:
        name = "boss_guide"


class ElasticStorage:
    def __init__(
        self,
        hosts: Optional[Sequence[str]] = None,
        index_name: Optional[str] = None,
    ) -> None:
        self.hosts = list(hosts) if hosts else os.getenv("ES_HOSTS", "http://localhost:9200").split(",")
        self.index_name = index_name or os.getenv("ES_INDEX", "boss_guide")

        # 建立底层连接供 dsl 使用
        connections.create_connection(alias="default", hosts=self.hosts)
        self.client = Elasticsearch(self.hosts)

        # 将模型与索引名称绑定
        GuideDoc._index._name = self.index_name

    def ensure_index(self, recreate: bool = False) -> None:
        idx = Index(self.index_name)

        if recreate and idx.exists():
            logger.warning(f"Recreating ES index: {self.index_name}")
            idx.delete()

        if not idx.exists():
            # 安装 ik 分词器等需要 ES 端已安装对应插件
            GuideDoc.init()
            logger.info(f"Created ES index: {self.index_name}")
        else:
            logger.info(f"ES index already exists: {self.index_name}")

    def upsert_documents(self, docs: List[Dict[str, Any]]) -> None:
        """
        批量写入/更新文档。
        docs: [{
          "content": str,
          "metadata": { text_block_id, user_role, chapter, content_type, source_mark, pdf_page, image_path }
        }]
        """
        for doc in docs:
            meta = doc.get("metadata", {})
            text_block_id = meta.get("text_block_id")
            if not text_block_id:
                logger.warning("Skip doc without text_block_id")
                continue

            body = {
                "content": doc.get("content", ""),
                "text_block_id": text_block_id,
                "user_role": meta.get("user_role", ""),
                "chapter": meta.get("chapter", ""),
                "content_type": meta.get("content_type", ""),
                "source_mark": meta.get("source_mark", ""),
                "pdf_page": int(meta.get("pdf_page", 0) or 0),
                "image_path": meta.get("image_path", ""),
            }

            self.client.index(index=self.index_name, id=text_block_id, document=body, refresh=False)

        # 手动刷新以便可见
        self.client.indices.refresh(index=self.index_name)

    def search(
        self,
        query: str,
        size: int = 10,
        user_role: Optional[str] = None,
        chapter: Optional[str] = None,
        content_type: Optional[str] = None,
    ) -> List[Dict[str, Any]]:
        must = []
        if query:
            must.append({"multi_match": {"query": query, "fields": ["content^3", "chapter", "source_mark"]}})

        filters = []
        if user_role:
            filters.append({"term": {"user_role": user_role}})
        if chapter:
            filters.append({"term": {"chapter": chapter}})
        if content_type:
            filters.append({"term": {"content_type": content_type}})

        es_query = {"bool": {"must": must, "filter": filters}}

        resp = self.client.search(index=self.index_name, query=es_query, size=size)
        hits = resp.get("hits", {}).get("hits", [])

        results: List[Dict[str, Any]] = []
        for h in hits:
            source = h.get("_source", {})
            score = h.get("_score", 0.0)
            source["_es_score"] = float(score)
            results.append(source)
        return results

    def get_by_ids(self, ids: List[str]) -> Dict[str, Dict[str, Any]]:
        if not ids:
            return {}
        resp = self.client.mget(index=self.index_name, ids=ids)
        out: Dict[str, Dict[str, Any]] = {}
        for d in resp.get("docs", []):
            if d.get("found"):
                out[d["_id"]] = d.get("_source", {})
        return out

    def delete(self, text_block_id: str) -> None:
        try:
            self.client.delete(index=self.index_name, id=text_block_id, refresh=True)
        except NotFoundError:
            logger.warning(f"ES doc not found for delete: {text_block_id}")


# ======================
# Milvus 向量存储层
# ======================
class MilvusStorage:
    def __init__(
        self,
        uri: Optional[str] = None,
        user: Optional[str] = None,
        password: Optional[str] = None,
        collection_name: Optional[str] = None,
        dim: Optional[int] = None,
        metric_type: str = "IP",  # 与通义 v4 常用内积
    ) -> None:
        self.uri = uri or os.getenv("MILVUS_URI", "http://localhost:19530")
        self.user = user or os.getenv("MILVUS_USER", "")
        self.password = password or os.getenv("MILVUS_PASSWORD", "")
        self.collection_name = collection_name or os.getenv("MILVUS_COLLECTION", "boss_guide_vectors")
        self.dim = dim or int(os.getenv("VECTOR_DIM", "1024"))
        self.metric_type = metric_type

        # 2.6.0 新推荐客户端
        self.client = MilvusClient(uri=self.uri, user=self.user or None, password=self.password or None)

        self._ensure_collection()

    def _ensure_collection(self) -> None:
        # 如果集合不存在则创建
        if not self.client.has_collection(self.collection_name):
            # 先创建空 schema，再逐个添加字段，使用 add_field(name, datatype, ...) 形式
            schema = self.client.create_schema(auto_id=False, description="Boss Guide Embeddings")
            schema.add_field("text_block_id", DataType.VARCHAR, is_primary=True, max_length=100)
            schema.add_field("embedding", DataType.FLOAT_VECTOR, dim=self.dim)
            schema.add_field("user_role", DataType.VARCHAR, max_length=32)
            schema.add_field("chapter", DataType.VARCHAR, max_length=128)
            schema.add_field("content_type", DataType.VARCHAR, max_length=32)
            schema.add_field("pdf_page", DataType.INT64)

            index_params = self.client.prepare_index_params()
            index_params.add_index(
                field_name="embedding",
                index_type="IVF_FLAT",
                metric_type=self.metric_type,
                params={"nlist": 1024},
            )

            self.client.create_collection(
                collection_name=self.collection_name,
                schema=schema,
                index_params=index_params,
                consistency_level="Strong",
            )
            logger.info(f"Created Milvus collection: {self.collection_name}")
        else:
            # 确保向量索引存在（若不存在则创建）
            try:
                index_info = self.client.describe_index(self.collection_name)
                has_vec_index = any(idx.get("field_name") == "embedding" for idx in (index_info or []))
                if not has_vec_index:
                    index_params = self.client.prepare_index_params()
                    index_params.add_index(
                        field_name="embedding",
                        index_type="IVF_FLAT",
                        metric_type=self.metric_type,
                        params={"nlist": 1024},
                    )
                    self.client.create_index(self.collection_name, index_params=index_params)
            except Exception as e:
                logger.debug(f"Ensure Milvus index failed or already exists: {e}")

    def upsert_vectors(self, items: List[Dict[str, Any]]) -> None:
        """
        items: [{
          "text_block_id": str,
          "embedding": List[float],
          "metadata": { user_role, chapter, content_type, pdf_page }
        }]
        """
        if not items:
            return

        # 采用 upsert 语义：先删除同 ID 再插入
        ids = [it.get("text_block_id") or it.get("metadata", {}).get("text_block_id") for it in items]
        ids = [i for i in ids if i]
        if ids:
            try:
                self.client.delete(collection_name=self.collection_name, pks=ids)
            except Exception:
                pass

        rows: List[Dict[str, Any]] = []
        for it in items:
            meta = it.get("metadata", {})
            text_block_id = it.get("text_block_id") or meta.get("text_block_id")
            embedding = it.get("embedding")
            if not text_block_id or not embedding:
                logger.warning("skip item missing id or embedding")
                continue
            rows.append({
                "text_block_id": text_block_id,
                "embedding": embedding,
                "user_role": meta.get("user_role", ""),
                "chapter": meta.get("chapter", ""),
                "content_type": meta.get("content_type", ""),
                "pdf_page": int(meta.get("pdf_page", 0) or 0),
            })

        if rows:
            self.client.insert(collection_name=self.collection_name, data=rows)

    def search(
        self,
        embedding: List[float],
        top_k: int = 10,
        user_role: Optional[str] = None,
        chapter: Optional[str] = None,
        content_type: Optional[str] = None,
    ) -> List[Dict[str, Any]]:
        expr_parts = []
        if user_role:
            expr_parts.append(f'user_role == "{user_role}"')
        if chapter:
            expr_parts.append(f'chapter == "{chapter}"')
        if content_type:
            expr_parts.append(f'content_type == "{content_type}"')
        expr = " and ".join(expr_parts) if expr_parts else ""

        res = self.client.search(
            collection_name=self.collection_name,
            data=[embedding],
            limit=top_k,
            search_params={"metric_type": self.metric_type, "params": {"nprobe": 16}},
            filter=expr or None,
            output_fields=["text_block_id", "user_role", "chapter", "content_type", "pdf_page"],
        )

        out: List[Dict[str, Any]] = []
        if res and len(res) > 0:
            # MilvusClient 返回列表结果，第一层是查询向量对应的结果集
            for hit in res[0]:
                fields = hit.get("entity", {}) or {}
                out.append({
                    "text_block_id": fields.get("text_block_id"),
                    "user_role": fields.get("user_role"),
                    "chapter": fields.get("chapter"),
                    "content_type": fields.get("content_type"),
                    "pdf_page": fields.get("pdf_page"),
                    "_vector_score": float(hit.get("distance", 0.0)),
                })
        return out


# ======================
# 简单混合检索门面
# ======================
class HybridGuideSearch:
    def __init__(self, es: Optional[ElasticStorage] = None, milvus: Optional[MilvusStorage] = None) -> None:
        self.es = es or ElasticStorage()
        self.milvus = milvus or MilvusStorage()

    def setup(self, recreate: bool = False) -> None:
        self.es.ensure_index(recreate=recreate)

    def index_blocks(
        self,
        blocks: List[Dict[str, Any]],
        embeddings: Optional[List[List[float]]] = None,
    ) -> None:
        """
        blocks: [{"content": str, "metadata": {...}}]
        embeddings: 与 blocks 等长，或 None（仅入 ES）
        """
        # ES 文档入库
        self.es.upsert_documents(blocks)

        # Milvus 向量入库
        if embeddings is not None:
            items: List[Dict[str, Any]] = []
            for block, vec in zip(blocks, embeddings):
                meta = block.get("metadata", {})
                items.append({
                    "text_block_id": meta.get("text_block_id"),
                    "embedding": vec,
                    "metadata": meta,
                })
            self.milvus.upsert_vectors(items)

    def hybrid_search(
        self,
        query: str,
        query_embedding: Optional[List[float]] = None,
        size: int = 10,
        weight_es: float = 0.5,
        weight_vec: float = 0.5,
        user_role: Optional[str] = None,
        chapter: Optional[str] = None,
        content_type: Optional[str] = None,
    ) -> List[Dict[str, Any]]:
        # 关键词检索
        es_results = self.es.search(
            query=query,
            size=size,
            user_role=user_role,
            chapter=chapter,
            content_type=content_type,
        )
        es_by_id = {r.get("text_block_id"): r for r in es_results}

        # 向量检索
        milvus_results: List[Dict[str, Any]] = []
        if query_embedding is not None:
            milvus_results = self.milvus.search(
                embedding=query_embedding,
                top_k=size,
                user_role=user_role,
                chapter=chapter,
                content_type=content_type,
            )

        # 归一化打分并融合
        def normalize(scores: List[float]) -> Dict[Any, float]:
            if not scores:
                return {}
            mn = min(scores)
            mx = max(scores)
            if mx - mn < 1e-9:
                return {i: 1.0 for i, _ in enumerate(scores)}
            return {i: (s - mn) / (mx - mn) for i, s in enumerate(scores)}

        es_scores = [r.get("_es_score", 0.0) for r in es_results]
        es_norm = normalize(es_scores)

        # Milvus IP 距离越大越相似（或返回 distance 含义为相似度，依部署而定），此处直接用值再归一
        mv_scores = [r.get("_vector_score", 0.0) for r in milvus_results]
        mv_norm = normalize(mv_scores)

        mv_by_id = {r.get("text_block_id"): r for r in milvus_results}

        # 合并 ID 集
        all_ids = set(es_by_id.keys()) | set(mv_by_id.keys())
        merged: List[Tuple[str, float]] = []
        for tid in all_ids:
            es_idx = next((i for i, r in enumerate(es_results) if r.get("text_block_id") == tid), None)
            mv_idx = next((i for i, r in enumerate(milvus_results) if r.get("text_block_id") == tid), None)

            es_score = es_norm.get(es_idx, 0.0) if es_idx is not None else 0.0
            mv_score = mv_norm.get(mv_idx, 0.0) if mv_idx is not None else 0.0
            merged.append((tid, weight_es * es_score + weight_vec * mv_score))

        merged.sort(key=lambda x: x[1], reverse=True)
        top_ids = [tid for tid, _ in merged[:size]]

        # 拉全量文档内容（优先 ES 源）
        id_to_doc = self.es.get_by_ids(top_ids)
        results: List[Dict[str, Any]] = []
        for tid in top_ids:
            doc = id_to_doc.get(tid)
            if not doc:
                # 兜底：若 ES 中不存在，拼装 Milvus 的元数据
                mv = mv_by_id.get(tid, {})
                doc = {
                    "text_block_id": tid,
                    "user_role": mv.get("user_role", ""),
                    "chapter": mv.get("chapter", ""),
                    "content_type": mv.get("content_type", ""),
                    "pdf_page": mv.get("pdf_page", 0),
                    "content": "",
                }
            results.append(doc)

        return results


if __name__ == "__main__":
    """
    简单自测：
    - 解析同目录下 PDF: "Boss 直聘平台使用指南.pdf"
    - 生成向量
    - 写入 ES 与 Milvus
    - 做一次关键词+向量混合检索
    运行前准备：
    - 启动 Elasticsearch 并安装 IK 分词（如上配置所需）
    - 启动 Milvus 2.6
    - 设置通义 API 环境变量 DASHSCOPE_API_KEY
    """
    logging.basicConfig(level=logging.INFO)

    try:
        from .pdf_parser import GuidePDFProcessor
        from .text_embedding import TextEmbedding
    except Exception:
        # 兼容从项目根目录直接运行
        from utils.use_file.pdf_parser import GuidePDFProcessor
        from utils.use_file.text_embedding import TextEmbedding

    pdf_path = os.path.join(os.path.dirname(__file__), "Boss 直聘平台使用指南.pdf")
    if not os.path.exists(pdf_path):
        logger.error(f"找不到PDF: {pdf_path}")
        raise SystemExit(1)

    logger.info("开始解析PDF并切块...")
    processor = GuidePDFProcessor(pdf_path=pdf_path, output_image_dir=os.path.join(os.path.dirname(__file__), "images"))
    blocks = processor.process()  # [{content, metadata}]
    logger.info(f"共生成文本块: {len(blocks)}")

    # 取前 N 块做演示，避免过大批量
    demo_n = min(30, len(blocks))
    demo_blocks = blocks[:len(blocks)]

    # 生成向量
    logger.info("生成向量...")
    embedder = TextEmbedding()
    contents = [b.get("content", "") for b in demo_blocks]
    embeddings = embedder.generate_embeddings(contents)

    # 建立索引并写入
    logger.info("写入 ES 和 Milvus...")
    hybrid = HybridGuideSearch()
    hybrid.setup(recreate=False)
    hybrid.index_blocks(demo_blocks, embeddings)
    logger.info("写入完成")

    # 做一次查询
    q = "如何发布职位"
    logger.info(f"混合检索: {q}")
    q_emb = embedder.generate_embeddings(q)[0]
    results = hybrid.hybrid_search(query=q, query_embedding=q_emb, size=5)

    for i, r in enumerate(results, 1):
        logger.info(f"Top{i} | 章节={r.get('chapter')} | 角色={r.get('user_role')} | 页码={r.get('pdf_page')}\n内容: {r.get('content','')[:120]}...")

