from customize.get_ollama import GetOllama

llm = GetOllama(model_name="llama3.2-vision:11b", model_type=1)()
from langchain_core.messages import HumanMessage
# response = llm.invoke("hello，你能说中文吗？")
# print(response)
from PIL import Image
import base64
import io


def image_to_base64(image_path):
    # Open the image file
    with Image.open(image_path) as img:
        # Create a BytesIO object to hold the image data
        img.resize((640, 480))
        buffered = io.BytesIO()
        # Save the image to the BytesIO object in a specific format (e.g., PNG)
        img.save(buffered, format="PNG")
        # Get the byte data from the BytesIO object
        img_bytes = buffered.getvalue()
        # Encode the byte data to base64
        img_base64 = base64.b64encode(img_bytes).decode('utf-8')
        return img_base64


def base64_to_jpeg(base64_data, output_path):
    """
    将Base64编码的数据转换为JPEG图像并保存到指定路径。

    :param base64_data: 要转换的Base64编码数据
    :param output_path: 保存生成的JPEG图像的路径
    """
    # 对Base64数据进行解码，得到字节数据
    img_bytes = base64.b64decode(base64_data)
    # 使用BytesIO将字节数据包装成文件对象
    buffered = io.BytesIO(img_bytes)
    # 从文件对象中打开图像，这里假设是JPEG格式（可根据实际情况调整）
    img = Image.open(buffered)
    # 将图像保存为JPEG格式到指定路径
    img.save(output_path, format="JPEG")


# Example usage
image_path = r'G:\2015湖南长沙张家界\DSCF7623.JPG'  # Replace with your image path
base64_image = image_to_base64(image_path)
# message = HumanMessage(
#     content=[
#         {"type": "text", "text": "使用中文描述一下这张照片。"},
#         {
#             "type": "image_url",
#             "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
#         },
#     ],
# )
message = HumanMessage(
    content=[
        {"type": "text", "text": "帮我画一张图片，蓝天，白云，草地，还有一只狗。"},
       # {"type": "image_url", "image_url": {"url": base64_image}},
    ],
)

response = llm.invoke([message])
# base64_to_jpeg(response.)
print(response.content)
