from langchain_community.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import QianfanEmbeddingsEndpoint
from langchain.vectorstores import Chroma

text_spliter = RecursiveCharacterTextSplitter(
    chunk_size = 100,
    chunk_overlap = 20,
    length_function = len
)

loader = PyPDFLoader("钱学森.pdf")
documents = loader.load_and_split(text_splitter=text_spliter)
print(len(documents))
print(documents[1])
print(len(documents[1].page_content))

embeddings = QianfanEmbeddingsEndpoint(
    qianfan_ak='ae7eoePCWBS8nBTQv7vbmyxm',
    qianfan_sk='tvxwV0Emkp9sxuU3neDfF77Drhl7TWUa'
)

#ans = embeddings.embed_documents([document[0]])
#print(f"{len(ans)}, {len(ans[0])}")

#print(ans[0])

db = Chroma.from_documents(documents, embeddings)
query = "钱学森"
ans = db.similarity_search(query, 1)
for i in ans:
    print(i)