import base64
from src.module.Model import Model
from langchain_core.messages import HumanMessage
from typing import Literal
from langchain_core.tools import tool
from langchain_community.chat_models.tongyi import ChatTongyi

Image1 = 'C:/Users/AO237/Desktop/image/daxiang.jpeg'
Image2 = 'C:/Users/AO237/Desktop/image/panda.jpeg'
Image3 = 'C:/Users/AO237/Desktop/image/songshu.jpeg'

## args 表示让大模型从里面去选，描述内容在内部langchain 会将其转换为提示词给到大模型让其根据此描述来给出结果
@tool
def different_tool(args: Literal["大熊猫", "松鼠", "大象"]):
    """必须调用此工具来识别图片中的动物，且参数必须是 '大熊猫'、'松鼠' 或 '大象'。"""
    return args

class Tools(Model):
    @staticmethod
    def transform_path_to_base64(file_path):
        with open(file_path, "rb") as image_file:
            return f"data:image/png;base64,{base64.b64encode(image_file.read()).decode('utf-8')}"

    def start(self):
        model = ChatTongyi(
            model_name="qwen-vl-max",     ## 有些模型对 tools 的是支持度有限，目前 多模态中使用此模型是最好的
        )
        model_with_tools = model.bind_tools([different_tool])

        # prompt = ChatPromptTemplate.from_messages(
        #     [
        #         ('system', '请描述下这些动物'),
        #         ('placeholder', '{input}')
        #     ]
        # )
        # chain = prompt | model_with_tools
        # response = chain.invoke({ "input": HumanMessage(content=[
        #         {'text': '请描述下这些动物'},
        #         { "image": self.transform_path_to_base64(Image1) },
        #         { "image": self.transform_path_to_base64(Image2) },
        #         { "image": self.transform_path_to_base64(Image3) }
        # ])})

        response = model_with_tools.invoke([
            HumanMessage(content=[
                {'text': '请描述下这些动物'},
                { "image": self.transform_path_to_base64(Image1) },
                { "image": self.transform_path_to_base64(Image2) },
                { "image": self.transform_path_to_base64(Image3) }
            ])
        ])
        print(response.tool_calls)

