# -*- coding: utf-8 -*-
"""
# --------------------------------------------------------
# @Author : Pan
# @E-mail : 
# @Date   : 2025-10-29 16:52:22
# @Brief  :
# --------------------------------------------------------
"""
import os
import torch
import numpy as np
from inspect import signature
from app.utils import media_utils
from app.core.mllm.inferbase import InferBase
from typing import List, Dict
from app.utils.log import logger, print_info
from transformers import Qwen3VLForConditionalGeneration, AutoProcessor, Qwen3VLProcessor

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
torch.manual_seed(100)
np.random.seed(100)


class Inference(InferBase):
    def __init__(self, model_info: Dict, **kwargs):
        model_file = model_info["model_file"]
        self.device = model_info.get("device", "cuda")
        self.max_tokens = model_info.get("max_tokens", 1024)
        assert os.path.exists(model_file), "model_file={} not exists".format(model_file)
        self.model_file = model_file
        print("load model      : ", model_file)
        self.model = Qwen3VLForConditionalGeneration.from_pretrained(model_file, dtype="auto", device_map=self.device)
        # self.processor = AutoProcessor.from_pretrained(model_file, trust_remote_code=True)
        self.processor = Qwen3VLProcessor.from_pretrained(model_file, trust_remote_code=True)
        self.generation_config = self.model.generation_config
        self.generation_config.max_new_tokens = self.max_tokens  # 可覆盖
        self.generation_config = self.generation_config.to_diff_dict()
        self.input_param = []
        print("load generation_config  : ", self.generation_config)

    def get_inputs_messages(self, messages: List):
        """
        获取消息列表和系统提示词
        openai messages is:
           [
            {'role': 'user/assistant', 'content': [{"type": "text", "text": "用户问题"}]},
            {'role': 'user/assistant', 'content': [{"type": "text", "text": "分析图片/视频"},
                                                   {"type": "image_url", "image_url": {"url": image}},
                                                   {"type": "image_url", "image_url": {"url": image}}
                                                   ]
             },
            ]
        qwen3vl messages is:
           [
             {'role': 'user/assistant', 'content': [{"type": "text", "text": "用户问题"}]},
             {'role': 'user/assistant', 'content': [{"type": "text", "text": "分析图片"},{"type": "image", "image": image}]},
             {'role': 'user/assistant', 'content': [{"type": "text", "text": "分析视频"},{"type": "video", "video": [frame1,frame2,frame3]}]},
           ]
        :param messages:
        :return:
        """
        video_metadata = {"fps": 2, "total_num_frames": 0}
        out_msg, system, num_frames = [], None, 0
        for x in messages:
            role, content, level = x["role"], x["content"], x.get("level", 1)
            assert isinstance(content, list), "content must be list"
            if role in ["user", "assistant"] and level == 1:
                cont_list, images = [], []
                for c in content:
                    if c["type"] == "text":
                        cont_list.append(c)
                    elif c["type"] == "image_url":
                        images.append(c["image_url"]["url"])
                if len(images) == 1:
                    cont_list.append({"type": "image", "image": images[0]})
                elif len(images) > 1:
                    metadata = {"fps": 2, "total_num_frames": len(images)}
                    cont_list.append({"type": "video", "video": images})
                    video_metadata = {**metadata, **x.get("video_metadata", {})}
                out_msg.append({**x, "content": cont_list})
            elif role == "system":
                system = content  # TODO 系统提示词
        return out_msg, system, video_metadata

    def get_inputs_param(self, **kwargs):
        """
        获取模型输入参数
        :param max_new_tokens : 模型最多生成的新 token（词元）数量。制输出长度上限，防止无限生成。
        :param temperature : 控制生成文本的“随机性”或“创造性”。
                             低值（如 0.1–0.5）：输出更确定、保守、重复性高，偏向高概率词。
                             高值（如 >1.0）   ：输出更随机、多样，但可能不连贯。
        :param top_p : 从累积概率不超过p的最小词集中采样。动态选择候选词：只保留概率累积到80%的最可能词，然后从中随机选；
                       top_p=1.0 表示使用全部词汇（等价于无限制）
        :param do_sample : 是否启用随机采样。必须设为True才能启用 temperature/top_p
                           False：贪婪解码（greedy decoding），每一步都选概率最高的词
                           True：启用采样（结合 temperature、top_p 等），引入随机性。
        :param repetition_penalty : 对已生成token施加惩罚，减少重复；对处理视频/图像描述中重复短语特别有用。
                                    =1.0：无惩罚。
                                    >1.0（如 1.05~1.2）：轻微惩罚重复词，鼓励多样性 。
                                    过大（如 >1.5）：可能导致模型回避合理重复（如“的”、“是”），影响流畅性。
        :return:
        """
        # 超参数配置
        vl_default = {'max_new_tokens': self.max_tokens,
                      'do_sample': True,
                      'temperature': 0.7,
                      'top_k': 20,
                      'top_p': 0.8,
                      'pad_token_id': 151643,
                      'bos_token_id': 151643,
                      'eos_token_id': [151645, 151643],
                      'repetition_penalty': 1.0,  # 对应 vLLM 的 presence_penalty
                      # 'presence_penalty': 1.5,
                      # 'greedy': False,
                      # 'out_seq_length': 16384,
                      }
        text_default = {'max_new_tokens': self.max_tokens,
                        'do_sample': True,
                        'temperature': 0.7,
                        'top_k': 40,
                        'top_p': 1.0,
                        'pad_token_id': 151643,
                        'bos_token_id': 151643,
                        'eos_token_id': [151645, 151643],
                        'repetition_penalty': 1.0,  # 对应 vLLM 的 presence_penalty
                        # 'presence_penalty': 2.0,
                        # 'greedy': False,
                        # 'out_seq_length': 32768,
                        }
        default = vl_default
        # default = text_default
        kwargs = {k: v for k, v in kwargs.items() if v is not None}  # 过滤None值
        # TODO Transformers的generate()方法时，若key不是model.generate输入参数，会报错
        for k, v in default.items():
            if k in kwargs: default[k] = kwargs[k]
        return default

    def chat(self, messages: List, stream=False, thinking=False, **kwargs):
        """
        实时分析功能
        level = 0: 表示该消息(role=assistant或者user)不参与模型推理，一般用于系统提示词、运行时信息等
        level = 1: 表示该消息(role=assistant或者user)作为上下文，参与模型推理，默认值
        level = 2: 表示该消息(role=user)是系统提示词，参与模型推理
        :param messages: openai messages
        :param thinking: 启用思维链推理，默认False
        :param stream: 是否启用流式输出
        :return:
        """
        messages, system, video_metadata = self.get_inputs_messages(messages)  # 将消息转换为模型输入格式
        kwargs = self.get_inputs_param(**kwargs)
        # kwargs = {**kwargs, **self.generation_config}
        print_info("qwen3vl messages        =", messages)
        print_info("qwen3vl kwargs          =", kwargs)
        print_info("qwen3vl video_metadata  =", video_metadata)
        # Preparation for inference
        # TODO video_metadata参数见：
        # from transformers.video_processing_utils import BaseVideoProcessor
        # BaseVideoProcessor.preprocess
        processor_kwargs = dict(video_metadata=video_metadata)  # 假定输入视频fps为2
        inputs = self.processor.apply_chat_template(messages,
                                                    tokenize=True,
                                                    add_generation_prompt=True,
                                                    return_dict=True,
                                                    return_tensors="pt",
                                                    **processor_kwargs,
                                                    ).to(self.device)
        # Inference: Generation of the output
        generated_ids = self.model.generate(**inputs, **kwargs)
        generated_ids_trimmed = [out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
        response = self.processor.batch_decode(generated_ids_trimmed,
                                               skip_special_tokens=True,
                                               clean_up_tokenization_spaces=False)
        if response:
            assert len(response) == 1, "response must be 1,response= {}".format(response)
            response = response[0]
        result = {"level": 1, "role": "assistant", "content": [{"type": "text", "text": response}]}
        return result

    def chat_image(self, text: str, image: np.ndarray | str = None, thinking=False,
                   stream=False, **kwargs):
        """
        图片分析功能
        :param text: 用户问题/任务描述
        :param image: RGB图像
        :param thinking:
        :param stream:
        :return:
        """
        image = media_utils.load_image(image)
        content = [
            {"type": "image_url", "image_url": {"url": image}},
            {"type": "text", "text": text},
        ]
        messages = [{'role': 'user', 'content': content}]
        result = self.chat(messages=messages, stream=stream, thinking=thinking, **kwargs)
        return result

    def chat_video(self, text: str, frames: List = [], thinking=False, stream=False, **kwargs):
        """
        视频帧序列实时分析功能
        :param text:  用户问题/任务描述
        :param frames: frames: List[Image] 视频帧列表
        :param thinking:
        :param stream:
        :return:
        """
        frames = [media_utils.load_image(img) for img in frames]
        content = [{"type": "image_url", "image_url": {"url": img}} for img in frames]
        content.append({"type": "text", "text": text})
        messages = [{'role': 'user', 'content': content}]
        result = self.chat(messages=messages, stream=stream, thinking=thinking, **kwargs)
        return result

    def camera_example(self, text, file, freq=2, winsize=10, overlap=0.5, stream=False, thinking=False):
        """
        实时分析功能，从视频中提取帧进行分析
        :param text: 用户问题/任务描述
        :param file: 视频文件路径
        :param freq: 视频帧提取频率（Hz）
        :param winsize: 滑动窗口处理
        :param overlap: 滑动窗口重叠比例
        :param thinking: 启用思维链推理，默认False
        :param stream: 是否启用流式输出
        :return:
        """
        from pybaseutils.cvutils import video_utils
        from pybaseutils.base import list_queue
        w, h, nums, fps = video_utils.get_video_info(file)
        video_cap = video_utils.video_iterator(file, freq=freq, vis=False)
        queue = list_queue.Queue(maxsize=100)
        results = []
        for data_info in video_cap:
            # TODO {"count": count, "time": t, "frame": frame, "w": w, "h": h, "fps": fps, "offset": count}
            queue.put(data_info, block=False)
            while queue.qsize() > winsize:
                data_list = queue.get_window(winsize=winsize, overlap=overlap)  # 提取窗口数据
                times_ = [data_list[0]['time'], data_list[-1]['time']]
                count = [info['count'] for info in data_list]
                frames = [data['frame'] for data in data_list]
                result = self.chat_video(text, frames=frames, thinking=thinking, stream=stream)
                results.append({"time": times_, "count": count, "result": result})
                print("分析结果: time={:20s},offset={}, result={}".format(str(times_), count, result))
        return results

    def chat_example(self, text: str, stream=False, thinking=False, **kwargs):
        messages = [{'role': 'user', 'content': [{"type": "text", "text": text}]}]
        result = self.chat(messages=messages, thinking=thinking, stream=stream, **kwargs)
        return result


if __name__ == '__main__':
    from pybaseutils import json_utils
    from app.config import config

    model_name = "Qwen3-VL-2B-Instruct"
    model_info = config.llm_model_zoos[model_name]
    image_file = "../../../data/image1.jpg"
    video_file = "../../../data/video2.mp4"
    infer = Inference(model_info)
    # 原有功能测试
    # result = infer.chat_image("请描述这张图片", image=image_file)
    # result = infer.chat_video("请描述这个视频", video=video_file)
    # result = infer.camera_example("请描述这个视频", file=video_file)
    # result = infer.chat_video_file("请描述这个视频", file=video_file)
    result = infer.chat_example("你好，请介绍一下自己", reqid="123456")
    # print(json_utils.formatting(result))
