
import lancedb
from sentence_transformers import SentenceTransformer
import pyarrow as pa

# 初始化模型与数据库
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
db = lancedb.connect("data/semantic_db")

# 示例文档数据
documents = [
    "熊猫是中国的国宝，主要栖息在四川山区",
    "长城是古代军事防御工程，全长超2万公里",
    "量子计算利用量子力学原理进行信息处理"
]

# 生成向量并创建表
embeddings = model.encode(documents)
print(len(embeddings[0]))
schema = pa.schema([
    pa.field("text", pa.string()),
    pa.field("vector", pa.list_(pa.float32(), len(embeddings[0])))
])
table = db.create_table("documents", schema=schema, mode="overwrite")
table.add([{"text": doc, "vector": vec} for doc, vec in zip(documents, embeddings)])

# 语义搜索函数
def semantic_search(query: str, top_k=1):
    query_vec = model.encode(query)
    results = table.search(query_vec).limit(top_k).to_pandas()
    print(results)
    return results.iloc[0]["text"]

# 示例查询
print("OK"+semantic_search("中国的国宝？"))

print("OKK")

