import os
from dotenv import load_dotenv
from pydantic import BaseModel
from langchain_openai import ChatOpenAI
from langchain.schema import HumanMessage,SystemMessage
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.base import BaseCallbackHandler
from typing import List

load_dotenv()

# 配置
API_TOKEN = os.getenv("SILICONFLOW_API_TOKEN")
API_URL = "https://api.siliconflow.cn/v1/"


class Message(BaseModel):
    role: str
    content: str

class ChatModel(BaseModel):
    messages: List[Message]
    model: str = "Qwen/QwQ-32B"
    stream: bool = True  # 默认开启流式
    max_tokens: int = 512
    temperature: float = 0.7
    n: int = 1


# 自定义回调输出 
class MyCallbackHandler(BaseCallbackHandler):
    def on_llm_start(self, serialized, prompts, **kwargs):
        print("LLM 开始生成...")
    
    def on_llm_new_token(self, token, **kwargs):
        print(f"新token: {token}")
    
    def on_llm_end(self, response, **kwargs):
        print("LLM 生成完成")

def create_chat_model(request: ChatModel):
    # 创建回调管理器
    callback_manager = CallbackManager([StreamingStdOutCallbackHandler(),MyCallbackHandler()])
    
    # 创建chat模型
    chat = ChatOpenAI(
        model=request.model,
        temperature=request.temperature,
        max_tokens=request.max_tokens,
        streaming=True,  # 强制开启流式输出
        callback_manager=callback_manager,
        openai_api_key=API_TOKEN,
        openai_api_base=API_URL,
        model_kwargs={}
    )

    # 转换消息格式
    messages = []
    for msg in request.messages:
        if msg.role == "system":
            messages.append(SystemMessage(content=msg.content))
        else:
            messages.append(HumanMessage(content=msg.content))

    # 使用 stream 方法进行流式输出
    response = ""
    for chunk in chat.stream(messages):
        response += chunk.content
    
    # return response
    print(response)

# 测试调用
result = create_chat_model(ChatModel(messages=[Message(role="user",content="你好")]))