from langchain_community.chat_models import ChatTongyi
from llama_index.core.base.embeddings.base import similarity
from llama_index.llms.langchain import LangChainLLM
from langchain_openai import ChatOpenAI
from llama_index.core.indices.query.query_transform.base import StepDecomposeQueryTransform
from llama_index.core.query_engine import MultiStepQueryEngine
from llama_index.core import Settings,SimpleDirectoryReader,VectorStoreIndex

from llama_index.embeddings.dashscope import (
    DashScopeEmbedding,
    DashScopeTextEmbeddingModels,
    DashScopeTextEmbeddingType
)
#将大问题拆分成为小问题
#词嵌入模型
embed_model = DashScopeEmbedding(
    model_name=DashScopeTextEmbeddingModels.TEXT_EMBEDDING_V3,
    text_type=DashScopeTextEmbeddingType.TEXT_TYPE_DOCUMENT,
    api_key="sk-f97e3654139742a4b01a99631628d36d"
)

# 初始化LLM
llm = ChatTongyi(model="qwen-plus", api_key="sk-f97e3654139742a4b01a99631628d36d")


Settings.llm = llm
Settings.embed_model = embed_model

step_decompose_transform = StepDecomposeQueryTransform(verbose=True)

docs = SimpleDirectoryReader("D:\Code\sshcode\RAG_pro\docs").load_data()
index = VectorStoreIndex.from_documents(docs)
query_engine = index.as_query_engine(similarity_top_k=5,streaming=True)
multi_step_query_engine = MultiStepQueryEngine(
    query_engine=query_engine,
    query_transform=step_decompose_transform,
    index_summary="公司人员信息"
)
question = "张华是什么部门的做什么职务？"
multi_step_res = multi_step_query_engine.query(question)
print(multi_step_res)
# res = query_engine.query("张华是什么部门的")
# print(res)