from langchain_core.messages import HumanMessage, AIMessage
from langchain_openai import ChatOpenAI
from openai import OpenAI


class QwenLLM:
    def __init__(self):
        self.client = ChatOpenAI(
        model="qwen-plus",  # 或者 "qwen-turbo", "qwen-max" 等
        openai_api_key='sk-965dc39b016c49ecbe29de180f4db2b6',
        openai_api_base="https://dashscope.aliyuncs.com/compatible-mode/v1",  # 兼容模式端点
        temperature=0.7,
        max_tokens=2048,
    )
        self.prompt = "你是一个有用的智能助手!"

    def invoke(self,messages:list):
        openai_messages = [
            {"role":"system","content":self.prompt}
        ]
        for message in messages:
            if isinstance(message,HumanMessage):
                openai_messages.append({"role":"user","content":message.content})
            else:
                openai_messages.append({"role":"assistant","content":message.content})
        completion = self.client.chat.completions.create(
            model="qwen-plus",  # 指定模型
            messages=openai_messages,
            stream=False  # 非流式输出
        )

        return AIMessage(content=completion.choices[0].message.content)

