"""
milvus_pipeline.py

用途：从本地文档 -> 分片（chunk）-> 使用 nomic-embed-text 生成 embeddings -> 导入 Milvus
每一步都会打印状态，便于调试与观察。

依赖：
  pip install pymilvus nomic
（可选）pip install tqdm

说明：
- 如果使用 Nomic 云（Atlas）服务，请设置环境变量 NOMIC_API_KEY。
- 默认使用 nomic-embed-text-v1.5, embedding_dim 默认 768（可调整，nomic 支持 64..768 等）。:contentReference[oaicite:2]{index=2}
"""

import os
import math
import re
import numpy as np
from typing import List
from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection
from nomic import embed  # nomic 官方 Python 客户端


# -------------------------
# 1) 连接 Milvus（带打印）
# -------------------------
def connect_milvus(host: str = "127.0.0.1", port: str = "19530", alias: str = "default"):
    print(f"[STEP] 1. 连接 Milvus -> {host}:{port} (alias='{alias}') ...")
    try:
        connections.connect(alias=alias, host=host, port=port)

        print("  ✅ 已连接到 Milvus")
    except Exception as e:
        print("  ❌ 连接 Milvus 失败：", e)
        raise

# -------------------------------------
# 2) 创建集合（如果存在可选择覆盖/复用）
# -------------------------------------
def create_collection(collection_name: str, dim: int = 768, drop_if_exists: bool = True):
    print(f"[STEP] 2. 准备 collection '{collection_name}' (dim={dim}) ...")
    if utility.has_collection(collection_name):
        print(f"  -> collection '{collection_name}' 已存在")
        if drop_if_exists:
            print(f"  -> drop_if_exists=True，删除旧集合 '{collection_name}' ...")
            utility.drop_collection(collection_name)
            print("  -> 已删除旧集合")
        else:
            print("  -> 复用已有集合")
            return Collection(collection_name)

    fields = [
        FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True),
        FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=dim),
        FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=65535),
    ]
    schema = CollectionSchema(fields, description="document chunks collection")
    collection = Collection(name=collection_name, schema=schema)
    print(f"  ✅ 创建集合 '{collection_name}' 成功")
    return collection

# -------------------------
# 3) 文本分片（chunk）
#    支持两种模式：sentence（按句子聚合）或 fixed（固定长度切片）
# -------------------------
def chunk_text(text: str, chunk_size: int = 1000, overlap: int = 200, method: str = "sentence") -> List[str]:
    print(f"[STEP] 3. 开始分片：chunk_size={chunk_size}, overlap={overlap}, method={method}")
    text = re.sub(r'\s+', ' ', text).strip()
    chunks = []

    if method == "fixed":
        start = 0
        L = len(text)
        while start < L:
            end = min(start + chunk_size, L)
            chunks.append(text[start:end].strip())
            start += max(1, chunk_size - overlap)
    else:  # sentence 聚合模式（对中/英文都可做简单分割）
        # 将文本按句子分割（保留句尾标点），这是一个简单 heuristic
        sentences = re.split(r'(?<=[。！？!?；;.\n])', text)
        current = ""
        for s in sentences:
            if len(current) + len(s) <= chunk_size:
                current += s
            else:
                if current:
                    chunks.append(current.strip())
                # overlap：把上一段的尾部 overlap 个字符带到下一段开头
                if overlap > 0:
                    tail = (current + s)[-overlap:]
                    current = tail
                else:
                    current = s
        if current:
            chunks.append(current.strip())

    print(f"  ✅ 分片完成，总段数：{len(chunks)}")
    if len(chunks) > 0:
        print("  -> 示例（前3段，截断显示）:")
        for i, c in enumerate(chunks[:3]):
            print(f"     [{i}] {c[:200]}{'...' if len(c) > 200 else ''}")
    return chunks

# -------------------------
# 4) 使用 nomic-embed-text 批量生成 embeddings
#    - model 默认 'nomic-embed-text-v1.5'
#    - 使用 task_type='search_document'（文档片段）或 'search_query'（查询）
# -------------------------



def embed_chunks(chunks: List[str], model: str = "nomic-embed-text-v1.5", dimensionality: int = 768, batch_size: int = 64) -> np.ndarray:
    print(f"[STEP] 4. 开始生成 embeddings (model={model}, dim={dimensionality}, batch_size={batch_size}) ...")
    embeddings = []
    total = len(chunks)
    for i in range(0, total, batch_size):
        batch = chunks[i:i+batch_size]
        batch_idx = i // batch_size + 1
        total_batches = math.ceil(total / batch_size)
        print(f"  -> embedding batch {batch_idx}/{total_batches}, size={len(batch)} ...")
        # nomic embed API 调用（返回可能是 dict 或对象，兼容处理）
        out = embed.text(texts=batch, model=model, task_type="search_document", dimensionality=dimensionality)
        # out 的格式视 nomic SDK 版本可能略有不同
        if isinstance(out, dict):
            batch_emb = out.get("embeddings")
        else:
            # 有些 SDK 返回对象属性
            batch_emb = getattr(out, "embeddings", None) or out
        if batch_emb is None:
            raise RuntimeError("nomic.embed 返回结果不包含 embeddings（检查 nomic SDK 版本/调用方式）")
        embeddings.extend(batch_emb)
    emb_array = np.array(embeddings, dtype=np.float32)
    print(f"  ✅ embeddings 生成完成，shape={emb_array.shape}")
    return emb_array

# -------------------------
# 5) 批量插入到 Milvus（打印插入进度）
# -------------------------
def insert_into_milvus(collection: Collection, embeddings: np.ndarray, chunks: List[str], insert_batch_size: int = 500):
    print(f"[STEP] 5. 插入到 Milvus collection '{collection.name}' (batch_size={insert_batch_size}) ...")
    total = len(chunks)
    for i in range(0, total, insert_batch_size):
        end = min(i + insert_batch_size, total)
        b_emb = embeddings[i:end].tolist()
        b_txt = chunks[i:end]
        batch_no = i // insert_batch_size + 1
        print(f"  -> 插入批次 {batch_no}, size={len(b_txt)} ...")
        mr = collection.insert([b_emb, b_txt])
        # mr.primary_keys 列表长度等于插入行数
        print(f"     已插入 {len(mr.primary_keys)} 条 (示例 id: {mr.primary_keys[:5]})")
    print("  ✅ 所有数据已插入 Milvus")

# -------------------------
# 6) 创建索引并 load
# -------------------------
def create_index_and_load(collection: Collection, index_type: str = "IVF_FLAT", metric_type: str = "COSINE", nlist: int = 1024):
    print(f"[STEP] 6. 创建索引 index_type={index_type} metric={metric_type} nlist={nlist} ...")
    index_params = {"index_type": index_type, "metric_type": metric_type, "params": {"nlist": nlist}}
    collection.create_index(field_name="embedding", index_params=index_params)
    print("  -> 索引已创建")
    collection.load()
    print("  -> collection 已加载到内存")

# -------------------------
# 7) 测试检索（query -> embed(query) -> search）
# -------------------------
def test_search(collection: Collection, query: str, model: str = "nomic-embed-text-v1.5", dimensionality: int = 768, top_k: int = 3):
    print(f"[STEP] 7. 测试检索（query 前 120 字）：{query[:120]} ...")
    out = embed.text(texts=[query], model=model, task_type="search_query", dimensionality=dimensionality)
    q_emb = out["embeddings"][0] if isinstance(out, dict) else getattr(out, "embeddings", out)[0]
    search_params = {"metric_type": "COSINE", "params": {"nprobe": 16}}
    results = collection.search(data=[q_emb], anns_field="embedding", param=search_params, limit=top_k, output_fields=["text"])
    print("  ✅ 检索结果：")
    for hit in results[0]:
        txt = hit.entity.get("text") if hit.entity else None
        print(f"    - id={hit.id}, score={hit.distance:.6f}")
        print(f"      text: {txt[:200]}{'...' if txt and len(txt) > 200 else ''}")

# -------------------------
# 整体运行示例（把上面串起来）
# -------------------------
def run_pipeline(
    file_path: str,
    milvus_host: str = "127.0.0.1",
    milvus_port: str = "19530",
    collection_name: str = "doc_chunks",
    chunk_size: int = 1000,
    overlap: int = 200,
    chunk_method: str = "sentence",
    model: str = "nomic-embed-text-v1.5",
    dimensionality: int = 768,
):
    # 1 connect
    connect_milvus(host=milvus_host, port=milvus_port)

    # 2 create collection
    collection = create_collection(collection_name, dim=dimensionality, drop_if_exists=True)

    # 3 read file
    print(f"[STEP] 0. 读取文件：{file_path} ...")
    with open(file_path, "r", encoding="utf-8") as f:
        text = f.read()
    print("  ✅ 文件读取完成，大小 (chars) =", len(text))

    # 4 chunk
    chunks = chunk_text(text, chunk_size=chunk_size, overlap=overlap, method=chunk_method)

    # 5 embed
    embeddings = embed_chunks(chunks, model=model, dimensionality=dimensionality, batch_size=64)

    # 6 insert
    insert_into_milvus(collection, embeddings, chunks, insert_batch_size=500)

    # 7 create index and load
    create_index_and_load(collection, index_type="IVF_FLAT", metric_type="COSINE", nlist=1024)

    # 8 test search
    test_search(collection, query="举个例子说明人工智能在医疗上的应用", model=model, dimensionality=dimensionality, top_k=3)

    print("[DONE] 全流程执行完毕（从连接 -> 分片 -> 导入 -> 索引 -> 检索）。")

# -------------------------
# 若作为脚本运行，请自行修改 file_path 并执行:
# python milvus_pipeline.py
# -------------------------
if __name__ == "__main__":
    # 简单示例（替换成你的文件路径）
    demo_file = r"D:\润和\点金需要的文档和存储\nginx配置解释.txt"
    if not os.path.exists(demo_file):
        print(f"请将待导入文本保存为 {demo_file}，然后重新运行脚本。")
    else:
        run_pipeline(file_path=demo_file)
