from langchain.text_splitter import CharacterTextSplitter
from langchain_openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS

with open("real_car_sales_data.txt", encoding='utf-8') as f:
    real_estate_sales = f.read()

text_splitter = CharacterTextSplitter(        
    separator = r'\n\d+\.',
    chunk_size = 100,
    chunk_overlap  = 0,
    length_function = len,
    is_separator_regex = True,
)

docs = text_splitter.create_documents([real_estate_sales])

db = FAISS.from_documents(docs, OpenAIEmbeddings())

db.save_local("real_car_sale")

query = "汽车的续航是多少？"

topK_retriever = db.as_retriever(search_kwargs={"k": 3})

query = "汽车的续航是多少？"
docs = topK_retriever.invoke(query) # new version
for doc in docs:
    print(doc.page_content + "\n")