import logging
import os

from llama_index.core import Settings, get_response_synthesizer
from llama_index.core.schema import TextNode, NodeWithScore
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.dashscope import DashScope

logging.basicConfig(level=logging.DEBUG)
print('初始化模型对象')
llm_dash = DashScope(
    model="qwen-turbo",  # 使用通义千问Max模型 qwen-max
    api_key=os.getenv("DASHSCOPE_API_KEY"),  # 从阿里云控制台获取
    temperature=0.3
)
embed_model_zh = HuggingFaceEmbedding(
    model_name="BAAI/bge-small-zh-v1.5",  # 中文优化模型
    device="cpu"
)
# 配置全局设置
Settings.embed_model = embed_model_zh
Settings.llm = llm_dash

nodes = [
    TextNode(text=
             "市中心广场的纪念碑建于1900年。"
             ),
    TextNode(text=
             "一只乌龟生活在动物园。"
             ),
    TextNode(text=
             "一朵兰花在深夜绽放。"
             ),
]
node_with_score_list = [NodeWithScore(node=node) for node in nodes]

synth = get_response_synthesizer(
    response_mode="refine",
    use_async=False,
    streaming=False,
)
response = synth.synthesize(
    "纪念碑是什么时候建的？",
    nodes=node_with_score_list
)
print('检索结果:', response)
