# -*- coding: utf-8 -*-
"""
# --------------------------------------------------------
# @Author : Pan
# @E-mail : 
# @Date   : 2025-10-29 16:52:22
# @Brief  : MiniCPM-V-4_5 https://www.modelscope.cn/models/OpenBMB/MiniCPM-V-4_5/?st=1Y5ujGXt5rPd7Pq03xbrKcA
# --------------------------------------------------------
"""
import os
import torch
import numpy as np
from app.utils import media_utils
from typing import List, Dict
from PIL import Image
from modelscope import AutoModel, AutoTokenizer
from app.core.mllm.inferbase import InferBase
from app.utils import utils

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
torch.manual_seed(100)
np.random.seed(100)


class Inference(InferBase):
    def __init__(self, model_info: Dict, **kwargs):
        model_file = model_info["model_file"]  # MiniCPM-V-4_5
        assert os.path.exists(model_file), "model_file={} not exists".format(model_file)
        print("load model: ", model_file)
        self.model_file = model_file
        self.model = AutoModel.from_pretrained(model_file,
                                               trust_remote_code=True,
                                               attn_implementation='sdpa',
                                               dtype=torch.bfloat16,
                                               device_map="auto")
        self.model.eval().cuda()
        self.tokenizer = AutoTokenizer.from_pretrained(model_file, trust_remote_code=True, use_fast=True)
        # # TODO self.processor不用也可以
        # self.processor = AutoProcessor.from_pretrained(model_file, trust_remote_code=True)
        # print("model_file: ", self.model_file)

    def get_inputs_messages(self, messages: Dict):
        """
        获取消息列表和系统提示词
        openai messages is:
           [
            {'role': 'user/assistant', 'content': [{"type": "text", "text": "用户问题"}]},
            {'role': 'user/assistant', 'content': [{"type": "text", "text": "分析图片/视频"},
                                                   {"type": "image_url", "image_url": {"url": image}},
                                                   {"type": "image_url", "image_url": {"url": image}}
                                                   ]
             },
            ]
        minicpm messages is:
            [{'role': 'user', 'content': [frame1,frame2,frame3,...,text]}]
        :param messages:
        :return:
        """
        video_metadata = {"fps": 2, "total_num_frames": 0}
        out_msg, system, num_frames = [], None, 0
        for x in messages:
            role, content, level = x["role"], x["content"], x.get("level", 1)
            assert isinstance(content, list), "content must be list"
            if role in ["user", "assistant"] and level == 1:
                cont_list = []
                for c in content:
                    if c["type"] == "text":
                        cont_list.append(c["text"])
                    elif c["type"] == "image_url":
                        cont_list.append(c["image_url"]["url"])
                out_msg.append({**x, "content": cont_list})
            elif role == "system":
                system = content  # TODO 系统提示词
        return out_msg, system, video_metadata

    def chat(self, messages: Dict, temporal_ids=None, thinking=False, stream=False, **kwargs):
        """
        实时分析功能
        level = 0: 表示该消息(role=assistant或者user)不参与模型推理，一般用于系统提示词、运行时信息等
        level = 1: 表示该消息(role=assistant或者user)作为上下文，参与模型推理，默认值
        level = 2: 表示该消息(role=user)是系统提示词，参与模型推理
        :param messages:
        :param temporal_ids: List[List[Int]], 视频帧时间戳ID（用于时序理解）
        :param thinking: 启用思维链推理，默认False
        :param stream: 是否启用流式输出
        :return:
        """
        messages, system, video_metadata = self.get_inputs_messages(messages)  # 将消息转换为模型输入格式
        # TODO message = [{'role': 'user', 'content': [frame1,frame2,frame3,...,text]}]
        response = self.model.chat(msgs=messages,
                                   tokenizer=self.tokenizer,
                                   system_prompt=system,
                                   use_image_id=None,
                                   # max_slice_nums=1, # 最大切片数量，默认为9,设置为1时，效果较差
                                   temporal_ids=temporal_ids,  # 实时分析不需要时间戳?
                                   enable_thinking=thinking,
                                   stream=stream
                                   )
        result = {"level": 1, "role": "assistant", "content": [{"type": "text", "text": response}]}
        return result

    def chat_example(self, text: str, stream=False, thinking=False, **kwargs):
        messages = [{'role': 'user', 'content': [{"type": "text", "text": text}]}]
        result = self.chat(messages=messages, thinking=thinking, stream=stream, **kwargs)
        return result


if __name__ == '__main__':
    from pybaseutils import json_utils
    from app.config import config

    model_name = "MiniCPM-V-4_5"
    model_info = config.llm_model_zoos[model_name]
    image_file = "../../../data/image1.jpg"
    video_file = "../../../data/video2.mp4"
    infer = Inference(model_info)
    # 原有功能测试
    # result = infer.chat_image("请描述这张图片", image=image_file, stream=False)
    # result = infer.chat_video("请描述这个视频", video=video_file, stream=False)
    result = infer.camera_example("请描述视频中人正在做什么", file=video_file)
    # result = infer.chat_example("你好，请介绍一下自己")
    print("摄像头分析结果:", result)
