from langchain_openai import ChatOpenAI
from langchain_core.runnables import RunnablePassthrough, RunnableLambda
from langchain_core.output_parsers import StrOutputParser
from langchain_core.messages import SystemMessage
from dotenv import load_dotenv
from config.config import Config
import os

load_dotenv()

class LLMClient:
    def __init__(self, model: str = None, temperature=0.7, max_tokens=512):
        self.model = model or Config.model["DEFAULT_LLM_MODEL"]
        # self.model = "Qwen2.5-VL-72B-instruct" 
        self.base_url = Config.api["BASE_URL"]
        self.api_key = Config.api["OPENAI_API_KEY"]
        self.temperature = temperature
        self.max_tokens = max_tokens
        self.llm = ChatOpenAI(
            model_name=self.model, 
            temperature=temperature,
            max_tokens=max_tokens,
            openai_api_key=self.api_key,
            openai_api_base=self.base_url,
            streaming=True,
        )
        self.parser = StrOutputParser()

    def change_model(self, model: str = None, temperature=0.7, max_tokens=512):
        if model is None or (model == self.model and temperature == self.temperature and max_tokens == self.max_tokens):
            return 
        self.model = model
        self.temperature = temperature
        self.max_tokens = max_tokens
        self.llm = ChatOpenAI(
            model_name=self.model, 
            temperature=self.temperature,
            max_tokens=self.max_tokens,
            openai_api_key=self.api_key,
            openai_api_base=self.base_url,
            streaming=True,
        )

    def change_streaming(self, streaming=False):
        # self.llm.streaming = streaming
        self.llm.streaming = True
        
    
    def create_chain(self, prompt_template):
        """创建可运行的对话链"""
        # return (
        #     RunnablePassthrough.assign(
        #         system_message=lambda x: x["system_message"].content
        #     )
        #     | prompt_template
        #     | self.llm
        #     | self.parser
        # )

        return (
            RunnablePassthrough.assign(
                system_prompt=lambda x: prompt_template.format(system_message=x["system_message"], history=x["history"]),
                # human_message=lambda x: x["input"]  # 直接提取 input 字段
            )
            | RunnableLambda(lambda x: [
                SystemMessage(content=x["system_prompt"]),  # 转换为 SystemMessage
                x["input"]  # HumanMessage 或纯文本
            ])
            | self.llm
            | self.parser
        )