from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.vectorstores import DocArrayInMemorySearch
from langchain_core.prompts import ChatPromptTemplate
from langchain_community.chat_models import ChatOllama
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser


# https://python.langchain.com/v0.1/docs/get_started/quickstart/


# 1. 读取文件并分词
print("读取文件")
# https://python.langchain.com/v0.2/docs/integrations/document_loaders/
documents = TextLoader("content/2023/0412.md", encoding="utf-8").load()
print(documents)

print("分词")
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(chunk_size=7500, chunk_overlap=100)
doc_splits = text_splitter.split_documents(documents)



# 2. 嵌入并存储
embeddings = OllamaEmbeddings(model='nomic-embed-text')
vectorstore = DocArrayInMemorySearch.from_documents(doc_splits, embeddings)
retriever = vectorstore.as_retriever()

# 3. 向模型提问
template = """
你是一个安全方向的专家，请根据已有的知识{context}回答问题{question}，不需要扩展 
Question: {question}
"""



prompt = ChatPromptTemplate.from_template(template)


model_local = ChatOllama(model="qwen2:7b")


chain = (
    {"context": retriever, "question": RunnablePassthrough()}
    | prompt
    | model_local
#输出解析器，将聊天消息转换为字符串
    | StrOutputParser()
)
print(template)
print(chain.invoke("Snakeyaml反序列化（CVE-2022-1471）漏洞排查方案"))