from langchain_community.chat_models import ChatTongyi
from llama_index.llms.langchain import LangChainLLM
from langchain_openai import ChatOpenAI
from llama_index.core import Settings,SimpleDirectoryReader,VectorStoreIndex

# llm = ChatOpenAI(model="qwen2.5:7b",base_url="http://192.168.1.102:64342/v1/",api_key="123")
from llama_index.embeddings.dashscope import (
    DashScopeEmbedding,
    DashScopeTextEmbeddingModels,
    DashScopeTextEmbeddingType
)

#词嵌入模型
embed_model = DashScopeEmbedding(
    model_name=DashScopeTextEmbeddingModels.TEXT_EMBEDDING_V3,
    text_type=DashScopeTextEmbeddingType.TEXT_TYPE_DOCUMENT,
    api_key="sk-f97e3654139742a4b01a99631628d36d"
)

# 初始化LLM
llm = ChatTongyi(model="qwen-plus", api_key="sk-f97e3654139742a4b01a99631628d36d")


Settings.llm = llm
Settings.embed_model = embed_model

docs = SimpleDirectoryReader("D:\Code\sshcode\RAG_pro\docs").load_data()
index = VectorStoreIndex.from_documents(docs)
query_engine = index.as_query_engine()
# res = query_engine.query("张华是什么职位？")
# print(res)
# context = [node.get_content() for node in res.source_nodes]
# print(context)

# def check_answer(question, answer, correct_answer):
#     prompt = ("你是⼀个测试⼈员。\n"
#     "下⾯给出了测试问题，正确答案以及回答。\n"
#     "你需要根据这些信息判断回答的内容是不是回答了⽤户的问题。\n"
#     "回复只能是：回答正确 或者 回答错误。请勿给出其他信息。\n"
#     "------"
#     f"问题是 {question}"
#     "------"
#     f"正确答案是： {correct_answer}"
#     "------"
#     f"回答是： {answer}"
#      )
#
#     return llm.invoke(prompt)
#
# # 问题
# question = "张华是什么职位？"
# # 正确答案
# correct_answer = "张华是产品经理"
# # RAG应⽤给出的回答
# answer = "无法回答该问题，因为提供的信息中没有提到名为“张华”的员工。"
# res = check_answer(question,answer,correct_answer)
# print(res)


def check_contexts(question,contexts):
    prompt = (
    "你是⼀个测试⼈员。你需要检测下⾯的这些参考资料是否能对回答问题提供帮助。\n"
    "回复只能是：有⽤ 或者 ⽆⽤。请勿给出其他信息。\n"
    "------"
    f"问题是： {question}"
    "------"
    f"参考资料是： {contexts}"
     )
    return llm.invoke(prompt)

# 问题
question = "张华是哪个部⻔的？"
# RAG应⽤给出的回答
contexts = [
    "星辰科技有限公司产品运营部员工张华，职位为产品经理，员工编号CY22001，入职日期2022-03-20，负责产品需求调研与分析，制定产品路线图。"
]
result = check_contexts(question,contexts)
print(result)
