# from langchain_community.document_loaders import WebBaseLoader
# from langchain_community.document_loaders import TextLoader
from langchain_community.document_loaders import RecursiveUrlLoader

from langchain_community import embeddings
from langchain_community.chat_models import ChatOllama
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
# from langchain.text_splitter import CharacterTextSplitter
from langchain_text_splitters import MarkdownHeaderTextSplitter
from langchain_community.vectorstores import DocArrayInMemorySearch
from langchain_community.embeddings import OllamaEmbeddings
# from langchain import *

print("开始执行")


# 1. 读取文件并分词
print("读取文件")
# documents = RecursiveUrlLoader("http://sec.x.x/ffat/").load()
documents = TextLoader("content/2023/0412.md", encoding="utf-8").load()
print(documents)
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(chunk_size=7500, chunk_overlap=100)
doc_splits = text_splitter.split_documents(documents)
print("分词")
headers_to_split_on = [
    ("#", "Header 1"),
    ("##", "Header 2")
]
markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on)
doc_splits = markdown_splitter.split_text(documents)
doc_splits

# 2. 嵌入并存储
embeddings = OllamaEmbeddings(model='nomic-embed-text')
vectorstore = DocArrayInMemorySearch.from_documents(doc_splits, embeddings)
retriever = vectorstore.as_retriever()
print("向模型提问")
# 3. 向模型提问
template = """
仅根据已有的知识{context}回答问题，不需要扩展 
Question: {question}
"""


print(template)
prompt = ChatPromptTemplate.from_template(template)
print(prompt)

model_local = ChatOllama(model="qwen2:7b")

print("使用qwen2:7b")
chain = (
    {"context": retriever, "question": RunnablePassthrough()}
    | prompt
    | model_local
    | StrOutputParser()
)
print(template)
print(chain.invoke("Snakeyaml反序列化（CVE-2022-1471）漏洞排查方案"))