from langchain_community.embeddings import ZhipuAIEmbeddings
from langchain_community.vectorstores import Milvus
import os
from dotenv import load_dotenv, find_dotenv
from langchain_community.document_loaders import JSONLoader


def batch_generator(lst, batch_size):
    for i in range(0, len(lst), batch_size):
        yield lst[i:i + batch_size]


def metadata_func(record: dict, metadata: dict) -> dict:
    metadata["article_id"] = record.get("article_id")
    return metadata


def get_metadata(record: dict, metadata: dict) -> dict:
    metadata["question"] = record.get("question")
    metadata["zhihu_link"] = record.get("zhihu_link")
    metadata["title"] = record.get("title")
    metadata["content"] = record.get("content")
    metadata["id"] = record.get("id")
    return metadata


def vector_db_batches(data, batch_size):
    for batch in batch_generator(data, batch_size):
        # 在这里进行批处理操作
        vector_db = Milvus.from_documents(
            documents=batch,
            embedding=embeddings,
            collection_name="quote",
            connection_args={
                "uri": "https://in03-f59eedf3170792f.serverless.ali-cn-hangzhou.cloud.zilliz.com.cn",
                "token": token,
                "secure": True
            }
        )
        print(f"Processing batch with {len(batch)} items")


_ = load_dotenv(find_dotenv())
api_key = os.environ["API_key"]
token = os.environ["Token"]
embeddings = ZhipuAIEmbeddings(
    model="embedding-3",
    api_key=api_key
)

file_path = './data/response.json'
loader = JSONLoader(file_path=file_path,
                    jq_schema='.data[]|select(.excerptCacheList != null) | .excerptCacheList[]',
                    content_key='quote',
                    metadata_func=metadata_func,
                    text_content=False)
article_data = JSONLoader(file_path=file_path,
                          jq_schema='.data[]',
                          content_key='id',
                          metadata_func=get_metadata,
                          text_content=False)
temp = article_data.load()
docs = loader.load()

index = 0
for doc in docs:
    article_id = doc.metadata["article_id"]
    while article_id != temp[index].metadata["id"]:
        index += 1
    print(article_id, temp[index].metadata["id"])
    doc.metadata["question"] = temp[index].metadata["question"]
    doc.metadata["zhihu_link"] = temp[index].metadata["zhihu_link"]
    doc.metadata["title"] = temp[index].metadata["title"]
    doc.metadata["content"] = doc.page_content
    doc.page_content = '问题是' + doc.metadata["question"] + '相应回答是' + doc.page_content

vector_db_batches(docs[:20], 5)
