# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2025/9/22 17:40
# @Author  : Dell
# @File    : context_question.py
# @Software: PyCharm
# @Desc    :langchain 应用之上下文问答

from langchain.chains import RetrievalQA
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter  # 这个可能还在原位置
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import DashScopeEmbeddings # 使用阿里云的向量模型
from langchain_openai import ChatOpenAI

llm = ChatOpenAI(model_name='qwen-plus', base_url="https://dashscope.aliyuncs.com/compatible-mode/v1", temperature=0.5,
                 api_key="sk-7b4f290b1a3e42a1a9a1957fa44eff37")

loader = TextLoader(file_path="xiyouji.txt",encoding="utf-8")
documents = loader.load()

# 初始化分割器
text_splitter=RecursiveCharacterTextSplitter(chunk_size=1000,chunk_overlap=50)
# 文档切分
texts=text_splitter.split_documents(documents)

# 获取字符总数，以便我们稍后查看平均值
num_total_characters =sum([len(x.page_content) for x in texts])
print(f"现在你有{len(texts)}个文档，平均每个文档有{num_total_characters/len(texts)*100:.2f} 个字符")
# 初始化向量引擎
embeddings = DashScopeEmbeddings(model='text-embedding-v1',dashscope_api_key="sk-7b4f290b1a3e42a1a9a1957fa44eff37")
# 把切割后的文本存入到本地向量数据库中
textsearch = FAISS.from_documents(texts,embeddings)
# 创建检索引擎
qa=RetrievalQA.from_chain_type(llm=llm,chain_type="stuff",retriever=textsearch.as_retriever())
query ="西游记中孙悟空的师傅是谁？"
respone =qa.invoke(query)
print(respone)
