"""
构建语义搜索引擎
"""
import os
os.environ["LANGSMITH_TRACING"] = "true"
os.environ["LANGSMITH_API_KEY"] = "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"
import tqdm
from langchain_community.document_loaders import PyPDFLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from tutorials.langchain_components import QwenEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.vectorstores import VectorStore
from langchain_core.language_models.llms import LLM
from typing_extensions import List

# 文档上传
file_path = "../data/nke-10k-2023.pdf"
loader = PyPDFLoader(file_path)
docs = loader.load()

# 分块
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=1000, chunk_overlap=200, add_start_index=True
)

all_splits = text_splitter.split_documents(docs)
# print(all_split.pop())
# for doc in docs:
#     print(doc.page_content)

# 编码
embeddings = QwenEmbeddings()
BATCH_SIZE = 32

first_batch = all_splits[:BATCH_SIZE]
if not first_batch:
    print("没有可处理的文本块，程序退出。")
    raise "没有可处理的文本块，程序退出"

print(f"正在处理第 1 批 (共 {len(all_splits) // BATCH_SIZE + 1} 批)...")
db = FAISS.from_documents(first_batch, embeddings)


for i in tqdm.tqdm(range(BATCH_SIZE, len(all_splits), BATCH_SIZE), desc="Adding documents to FAISS"):
    batch = all_splits[i:i + BATCH_SIZE]
    if batch:
        db.add_documents(batch)

db.save_local("../save_dir")




