from csagent.core.node.base_node import BaseNode
from csagent.core.context import AgentState
from openai import OpenAI
import time
import os
import copy
class Node(BaseNode):
    def initialize(self):
        """初始化节点"""
        print(self.conf)
        print('llm Node初始化成功')
    def execute(self, state: AgentState, stream_writer=None) -> AgentState:
        print(f"llm node开始执行")
        query = state['messages'][-1]['content']
        client = OpenAI(
            # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
            api_key=os.getenv("CS_API_KEY"),
            base_url=os.getenv("CS_BASE_URL"),
        )
        start = int(time.time() * 1000)
        response = client.chat.completions.create(
            model="qwen-turbo", 
            messages=[
                {"role": "user", "content": query}
            ],
            stream=True  # 关键参数，启用流式响应
        )
        all_content = "" #流式处理时记录的完整content
        for chunk in response:
            if chunk.choices[0].delta.content is not None:
                delta = copy.deepcopy(state)
                delta['is_delta'] = True
                delta['messages'].append({"role":"assistant", "content":chunk.choices[0].delta.content})
                stream_writer(delta)
                all_content +=chunk.choices[0].delta.content
        end = int(time.time() * 1000)
        print(f"llm node cost:%d" % (end-start))
        state['messages'].append({"role":"assistant", "content":all_content})
        return state