import os
import pandas as pd
from langchain_text_splitters import RecursiveCharacterTextSplitter
from openai import OpenAI
from pymilvus import Collection, FieldSchema, CollectionSchema, DataType, connections
from langchain.schema import Document

os.environ["OPENAI_API_KEY"] = "sk-38b1a77d899b4e708287a296ceeb02e3"

def get_embeddings(input_text):
    client = OpenAI(
        api_key=os.getenv("OPENAI_API_KEY"),
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    )
    completion = client.embeddings.create(
        model="text-embedding-v3",
        input=input_text,
        encoding_format="float"
    )
    return completion.data[0].embedding

acu = pd.read_csv("AcupointTable.csv", encoding="utf-8")

acu_location_documents = [
    Document(
        page_content=row["AcupointLocation"],
        metadata={
            "source": "AcupointLocation",
            "name": row["AcupointName"],
            "location": row["AcupointLocation"],
            "operation": row["Operation"],
        }
    )
    for index, row in acu.dropna(subset=["AcupointLocation"]).iterrows()
]

text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=200)

split_documents = text_splitter.split_documents(acu_location_documents)

embeddings = [get_embeddings(doc.page_content) for doc in split_documents]

connections.connect("default", host="localhost", port="19530")

# 定义字段模式
collection_name = "acupoint"
id_field = FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True)  #自动生成ID
vector_field = FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=1024)  #embedding维度1024
text_field = FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=65535)
metadata_field = FieldSchema(name="metadata", dtype=DataType.JSON)

#创建集合模式
schema = CollectionSchema(fields=[id_field, vector_field, text_field, metadata_field], description="Acupoint location schema")

#创建集合
collection = Collection(name=collection_name, schema=schema)

#准备插入的数据
texts = [doc.page_content for doc in split_documents]  #文档内容
metadatas = [doc.metadata for doc in split_documents]  #元数据（包含多个列）

#插入数据到Milvus集合
collection.insert([embeddings, texts, metadatas])

#创建索引
index_params = {
    "index_type": "IVF_FLAT",
    "metric_type": "L2",
    "params": {"nlist": 128}
}
collection.create_index(field_name="vector", index_params=index_params)

print(f"Index created for collection {collection_name}")
