import re
import numpy as np
from nomic import embed
from pymilvus import connections, FieldSchema, CollectionSchema, DataType, Collection, utility

# ===== 1. 已连接 Milvus（假设连接已建立） =====
collection_name = "doc_chunks"

# 如果集合已存在就删除
if utility.has_collection(collection_name):
    utility.drop_collection(collection_name)

# 定义集合 schema
fields = [
    FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
    FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=768),  # nomic-embed-text 输出 768 维
    FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=2000)     # 存储原文片段
]
schema = CollectionSchema(fields, description="文档分片向量库")

collection = Collection(name=collection_name, schema=schema)

# ===== 2. 文本分片函数 =====
def chunk_text(text, chunk_size=500, overlap=50):
    text = re.sub(r'\s+', ' ', text)  # 简单清洗
    chunks = []
    start = 0
    while start < len(text):
        end = start + chunk_size
        chunk = text[start:end]
        chunks.append(chunk)
        start += chunk_size - overlap
    return chunks

# ===== 3. 读取文档并分片 =====
with open("your_document.txt", "r", encoding="utf-8") as f:
    document = f.read()

chunks = chunk_text(document)
print(f"✅ 文本分片完成，共 {len(chunks)} 段")

# ===== 4. 向量化（nomic-embed-text）=====
embeddings = embed.text(
    texts=chunks,
    model="nomic-embed-text"
).embeddings

# ===== 5. 插入到 Milvus =====
collection.insert([embeddings, chunks])

# 创建索引
index_params = {
    "metric_type": "COSINE",
    "index_type": "IVF_FLAT",
    "params": {"nlist": 64}
}
collection.create_index(field_name="embedding", index_params=index_params)

collection.load()
print("✅ 数据已导入并索引完成")

# ===== 6. 测试检索 =====
query = "人工智能在医疗领域的应用"
query_embedding = embed.text(
    texts=[query],
    model="nomic-embed-text"
).embeddings

results = collection.search(
    data=query_embedding,
    anns_field="embedding",
    param={"metric_type": "COSINE", "params": {"nprobe": 10}},
    limit=3,
    output_fields=["text"]
)

for hit in results[0]:
    print(f"相似度: {hit.distance:.4f}, 文本片段: {hit.entity.get('text')}")
