from langchain_core.runnables import RunnablePassthrough
from langchain_core.messages import HumanMessage, AIMessage
import base64

from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser

class LLMClient:
    def __init__(self, model: str = None, temperature=0.7, max_tokens=512):
        self.model = model or "Qwen/Qwen2.5-VL-72B-Instruct"
        # self.model = "Qwen2.5-VL-72B-instruct" 
        self.base_url = 'https://api.siliconflow.cn/v1'
        self.api_key = 'sk-svuwiezizbxyflaqnhapzgijjdmeiaotknizeezdwzesoruw'
        self.llm = ChatOpenAI(
            model_name=self.model, 
            temperature=temperature,
            max_tokens=max_tokens,
            openai_api_key=self.api_key,
            openai_api_base=self.base_url,
            streaming=True
        )
        self.parser = StrOutputParser()
    
    def create_chain(self):
        """创建可运行的对话链"""
        return (
            self.llm
            | self.parser
        )

def create_chain(self, llm):
    return (
        llm
        | self.parser
    )

def encode_image(image_path):
    with open(image_path, "rb") as f:
        return base64.b64encode(f.read()).decode("utf-8")


# 示例：Base64 编码的图片数据
image_data = encode_image("./test/test.png")  # 自定义编码函数

# 构建输入
chain_input = {
    "history": [],
    "input": [HumanMessage(
        content=[
            {"type": "text", "text": "描述这张图片"},
            {
                "type": "image_url",
                "image_url": {
                    "url": f"data:image/jpeg;base64,{image_data}"
                }
            }
        ]
    )]
}

llm = LLMClient()
chain = llm.create_chain()

# result = chain.invoke(chain_input["input"])
# print(result)
# 调用链
for result in chain.stream(chain_input["input"]):
    print(result)