# -*- coding: utf-8 -*-
"""
# --------------------------------------------------------
# @Author : Pan
# @E-mail : 
# @Date   : 2025-11-03 15:06:52
# @Brief  :
# --------------------------------------------------------
"""
import sys
import os

sys.path.insert(0, os.getcwd())
import traceback
from typing import List, Dict
from app.utils.log import logger, print_info
from app.config import config
from pybaseutils import image_utils, base64_utils

img_prefix = "data:image/jpeg;base64,"


class Pipeline(object):
    def __init__(self, ):
        self.model_name = config.model_info["model_name"]
        self.model_file = config.model_info["model_file"]
        logger.info("LLM info={}".format(config.model_info))
        if self.model_name.lower() == "MiniCPM-V-4_5".lower():
            from app.core.mllm import minicpm
            self.infer = minicpm.Inference(config.model_info)
        elif self.model_name.lower() == "Qwen3-VL-2B-Instruct".lower():
            from app.core.mllm import qwen3vl
            self.infer = qwen3vl.Inference(config.model_info)
        else:
            logger.error("LLM model_name={} not support".format(self.model_name))

    def task(self, params: Dict):
        """
        gradio messages is [
                 {'role': 'user/assistant', 'content': "用户问题/任务描述"},
                 {'role': 'user/assistant', 'content': [image_file]},
                 {'role': 'user/assistant', 'content': [video_file]},
             ]
        openai messages is:
           [
            {'role': 'user/assistant', 'content': [{"type": "text", "text": "用户问题"}]},
            {'role': 'user/assistant', 'content': [{"type": "text", "text": "分析图片/视频"},
                                                   {"type": "image_url", "image_url": {"url": image}},
                                                   {"type": "image_url", "image_url": {"url": image}}
                                                   ]
             },
            ]
        :param params: {'model': "", 'messages': [openai messages],'config':{}}
        :return: {"level": 0, "role": "assistant", "content": msg, "code": 5000, "msg": msg}
        """
        try:
            params = base64_utils.deserialization(params, prefix=img_prefix, use_rgb=True, img_type="pil")
            print(params)
            messages = params.pop("messages", [])  # TODO messages OpenAI格式
            assert messages, "input messages is empty,messages={}".format(messages)
            print_info("input messages=", messages)
            prompts = messages[-1]
            #  TODO result is {"level": 1, "role": "assistant", "content": [{"type": "text", "text": "response"}]}
            result = self.infer.chat(messages=messages, **params)
            result = {**prompts, **result}
            # TODO DEBUG
            # content = [{"type": "text", "text": "系统收到：{}".format(prompts)}]
            # result = {**prompts, "level": 1, "role": "assistant", "content": content, "code": 0, "msg": "ok"}
        except Exception as e:
            traceback.print_exc()
            msg = "系统发生错误: {}".format(str(e))
            result = {"level": 0, "role": "assistant", "content": msg, "code": 5000, "msg": msg}
        logger.info("result     ={}".format(result))
        return result


if __name__ == '__main__':
    from pybaseutils import json_utils
    from app.config import config

    image_file = os.path.join(config.root, "data/image1.jpg")
    video_file = os.path.join(config.root, "data/video2.mp4")
    image = image_utils.read_image(image_file, use_rgb=True)
    # OpenAI-style格式
    image_list = [{"type": "image_url", "image_url": {"url": image}}] * 6
    messages = [{"role": "user",
                 "content": [{"type": "text", "text": "你好，请介绍一下自己"}],
                 "reqid": "reqid001"
                 },
                {"role": "user",
                 "content": image_list + [{"type": "text", "text": "请描述这张图片"}],
                 "video_metadata": {"fps": 2, 'total_num_frames': len(image_list)},
                 "reqid": "reqid002"
                 }]
    params = {"messages": messages, "model": "model_name"}
    params = base64_utils.serialization(params, prefix=img_prefix, use_rgb=True)
    infer = Pipeline()
    result = infer.task(params)
    print("结果:", json_utils.formatting(result))
    print(result["content"][0]["text"])
