from langchain_community.llms import Ollama
from langchain_community.document_loaders import TextLoader
import requests
#from auth_util import gen_sign_headers
from langchain_community.vectorstores import Chroma
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_community.embeddings import ZhipuAIEmbeddings
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_community.embeddings import BaichuanTextEmbeddings

loader = TextLoader("output.txt", encoding='utf-8')  # 加载文档
documents = loader.load_and_split()

# 一个嵌入模型实例，将文档向量化
embeddings = BaichuanTextEmbeddings(baichuan_api_key="sk-ea8ee1a5f86721978746fe3537e38d15")
print("hello")
# 使用Chroma创建一个向量存储实例，向量化数据将被存储于本地目录路径
vectorstore = Chroma.from_documents(
    documents,
    embedding=embeddings,
    persist_directory="./chroma1_db"
)

# vectorstore = Chroma.from_documents(documents, embeddings, persist_directory="./chroma_db")
# 从本地加载
vectorstore = Chroma(persist_directory="./chroma1_db", embedding_function=embeddings)

retriever = vectorstore.as_retriever()  # 检索
template = """仅根据以下上下文回答问题，如果不能，请直接说:我不知道，
请一步一步地思考:
{context}

提问: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
llm = Ollama(model="qwen2")  # 实例化语言模型，用于生成回答

output_parser = StrOutputParser()
setup_and_retrieval = RunnableParallel(
    {"context": retriever, "question": RunnablePassthrough()}
)

chain = setup_and_retrieval | prompt | llm | output_parser


print(chain.invoke("CWE是什么？"))
