# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2025/9/24 22:22
# @Author  : Dell
# @File    : virtual​​ _question.py
# @Software: PyCharm
# @Desc    :把问题转化成虚拟文档，然后实现虚拟文档与文档的搜索，提高精准度。
# 虚拟文档嵌入HyDE
from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser

llm = ChatOpenAI(model_name='qwen-plus', base_url="https://dashscope.aliyuncs.com/compatible-mode/v1", temperature=0.5,
                 api_key="sk-7b4f290b1a3e42a1a9a1957fa44eff37")
# ----------------------虚拟文档生成----------------------
template = """请写一段介绍性文章来回答问题
问题：{question}
文章："""
prompt_hyde = ChatPromptTemplate.from_template(template)
generate_docs_for_retrieval = (
    prompt_hyde | llm | StrOutputParser()
)
question = "AI在中国的发展历程"
generate_docs_for_retrieval.invoke({"question":question})
# 检索
retrieval_chain = generate_docs_for_retrieval
retireved_docs = retrieval_chain.invoke({"question":question})
print(f"打印问题转换成的文档内容：{retireved_docs}")
# ----------------------虚拟文档生成----------------------
# ----------------------RAG检索----------------------
# RAG
template = """根据此上下文回答以下问题：

{context}

问题：{question}
"""

prompt = ChatPromptTemplate.from_template(template)

final_rag_chain = (
    prompt
    | llm
    | StrOutputParser()
)
# 把原始问题转化成虚拟文章后作为上下文进行RAG检索
f_re=final_rag_chain.invoke({"context":retireved_docs,"question":question})
print(f"\n打印最终检索结果：{f_re}")