# GF_PY3_EXAMPLE_Open_WebUI_Pipelines_双模型图文处理.py
# Create by GF 2025-04-08 12:10

# title: 双模型图文处理流
# author: Gou Feng
# date: 2024-06-20
# version: 1.0
# license: MIT
# description: MiniCPM 图像描述 + DeepSeek-R1 文本提炼
# requirements: []

# Example:
#
# >>> test_image = base64.b64encode(b"fake_image_data").decode() # 构造测试图像数据
# >>> print("图像数据: ": test_image)
# 图像数据: iVBORw0KGgoAAAANSU...
# >>>
# >>> test_body = { # 构建测试请求
# ...     "messages": [{
# ...         "role": "user",
# ...         "image": f"data:image/png;base64,{test_image}",
# ...         "content": "请分析这张图像"
# ...     }]
# ... }
# >>>
# >>> pipeline = Pipeline() # 实例化对象
# >>> result = asyncio.run(pipeline.inlet(test_body)) # 运行处理流
# >>>
# >>> print(result)
# {
#   "messages": [
#     {
#       "role": "user",
#       "image": "...",
#       "content": "请分析这张图像"
#     },
#     {
#       "role": "assistant",
#       "content": "处理结果: 都市夜景: 玻璃幕墙大厦与夜月",
#       "processing_steps": {
#         "raw_description": "图像内容: 城市天际线夜景...",
#         "summary_model": "deepseek-r1:7b"
#       }
#     }
#   ]
# }

from typing import List, Dict
from pydantic import BaseModel
import asyncio
import base64

class Pipeline:
    class Valves(BaseModel):
        pipelines: List[str] = ["minicpm-v:8b", "deepseek-r1:7b"]
        priority: int = 0

    def __init__(self):
        self.type = "filter"
        self.name = "双模型图文处理器"
        self.valves = self.Valves()

    async def inlet(self, body: Dict, user: Dict = None) -> Dict:
        messages = body.get("messages", [])
        
        # 获取最后 1 张图像
        last_image = self._get_last_image(messages)
        if not last_image:
            return body  # 无图像直接返回

        # 第一阶段: 调用 MiniCPM 生成描述
        image_data = last_image["image"].split(",")[-1]  # 去除 base64 前缀
        raw_desc = await self._call_minicpm(image_data)
        
        # 第二阶段: 调用 DeepSeek-R1 进行提炼
        refined_summary = await self._call_deepseek(raw_desc)
        
        # 构建响应消息
        new_msg = {
            "role": "assistant",
            "content": f"处理结果: {refined_summary}",
            "processing_steps": {
                "raw_description": raw_desc,
                "summary_model": "deepseek-r1:7b"
            }
        }
        
        return {"messages": messages + [new_msg]}

    def _get_last_image(self, messages: List[Dict]) -> Dict:
        """从消息列表中逆向查找最新含图像消息"""
        for msg in reversed(messages):
            if msg.get("role") == "user" and "image" in msg:
                return msg
        return None

    async def _call_minicpm(self, image_base64: str) -> str:
        """MiniCPM 图像描述生成 (需实现实际调用逻辑)"""
        # 示例伪代码 - 替换为实际 API 调用
        byte_data = base64.b64decode(image_base64)
        
        # 此处应调用 MiniCPM 的图像理解 API
        await asyncio.sleep(0.2)  # 模拟延迟
        # 示例返回: 
        return "图像内容: 城市天际线夜景，多栋玻璃幕墙摩天大楼，蓝色夜空中有明亮的月亮"
        
    async def _call_deepseek(self, text: str) -> str:
        """DeepSeek-R1 文本提炼 (需实现实际调用逻辑)"""
        # 示例伪代码 - 替换为实际API调用
        prompt = f"请将以下图像描述提炼为20字内的摘要: \n{text}"
        
        # 此处应调用 DeepSeek 的文本生成 API
        await asyncio.sleep(0.1)  # 模拟延迟
        # 示例返回: 
        return "都市夜景: 玻璃幕墙大厦与夜月"
