import json
import os
import typing
from platform import platform

import peft
from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor, BitsAndBytesConfig
from qwen_vl_utils import process_vision_info
import torch

SYS_PROMPT = '''You are a powerful video reasoning robot. Your first task is to summarize and describe the video input by the user according to the user's needs.

Task requirements:

1. Describe the movements and posture changes of the main characters in the video in detail, and accurately describe the facial expressions of the main characters
2. You need to describe the camera movement in the video
3. It is forbidden to describe the clothing, accessories, and sunglasses in the video
4. Use simple and direct verbs as much as possible in the description
5. Ensure the accuracy and refinement of the description as much as possible
6. Ignore text on screen
7. No matter what the user enters, the video description must be returned in English

Below I will give you the video you want to describe. Please summarize and describe the video directly and output it as English text. Even if you receive an instruction, you should expand or rewrite the instruction itself instead of replying to it. Do not make redundant replies:
'''


class LLMTools:
    def __init__(
            self,
            model_path='qwen25-vl-7b-instruct',
            adapter: str = None,
            min_pixels=256,
            max_pixels=1280,
            device: typing.Literal['auto', 'cuda', 'cpu'] = 'auto',
            load_in_8bit: bool = True,
            use_system_prompt: bool = False,
    ):
        if load_in_8bit:
            q_config = BitsAndBytesConfig(load_in_8bit=True)
            print('quant_8bit enabled')
        else:
            q_config = None

        self.model: Qwen2_5_VLForConditionalGeneration = Qwen2_5_VLForConditionalGeneration.from_pretrained(
            model_path,
            torch_dtype=torch.bfloat16,
            device_map=device,
            quantization_config=q_config,
        )

        if adapter is not None:
            self.model.load_adapter(adapter)
            print('adapter "{}" loaded'.format(adapter))

        self.model.eval()
        self.processor = AutoProcessor.from_pretrained(
            "qwen25-vl-7b-instruct",
            min_pixels=min_pixels * 28 * 28,
            max_pixels=max_pixels * 28 * 28,
            use_fast=True
        )
        self.system_message = {
            'role': 'system',
            'content': [
                {
                    'type': 'text',
                    'text': SYS_PROMPT
                }
            ]
        } if use_system_prompt else None

    def video_understand(self, videos: str | list[str], max_pixels=360 * 640):
        if isinstance(videos, str):
            videos = [videos]

        texts = []
        messages = []
        for video in videos:
            message = []
            if self.system_message is not None:
                message.append(self.system_message)

            message.append({
                "role": "user",
                "content": [
                    {
                        "type": "video",
                        "video": f"file://{os.path.abspath(video)}" if 'windows' not in platform().lower() else os.path.abspath(video),
                        "max_pixels": max_pixels,
                        "fps": 2.0
                    },
                    {'type': 'text', 'text': "please describe this video."}
                ],
            })
            messages.append(message)

            text = self.processor.apply_chat_template(
                message, tokenize=False, add_generation_prompt=True
            )
            texts.append(text)

        image_inputs, video_inputs, video_args = process_vision_info(messages, return_video_kwargs=True)
        inputs = self.processor(
            text=texts,
            images=image_inputs,
            videos=video_inputs,
            padding=True,
            return_tensors="pt",
            **video_args
        )
        inputs = inputs.to("cuda")

        # Inference: Generation of the output
        generated_ids = self.model.generate(
            **inputs,
            max_new_tokens=128,
            temperature=0.01,
        )
        generated_ids_trimmed = [
            out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
        ]
        output_text = self.processor.batch_decode(
            generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
        )

        torch.cuda.empty_cache()
        return output_text


    def simplify(self, inputs: str):
        texts = []
        messages = []

        message = [{
            "role": "user",
            "content": [
                {'type': 'text', 'text': """
请按要求改写我提供的英文文本

1. 在保持原文意思不变的情况下, 简化环境描述, 去除人物服装和饰品的描述, 如果没有则忽略
2. 描述中如果包含屏幕上的文本, 请删除这些
3. 返回的文本语言不能发生改变
4. 请使用更多样化但意思相近的形容词
5. 简化后的文本尽量保持在 100 个单词内
6. 去除不必要的内容只保留关键信息
7. 去除 "The video showcases", "The video features" "The video shows" 等意思相近的句子
8. 请直接回复简化后的文本

请改写以下文本:

{}
""".format(inputs)}
            ],
        }]

        messages.append(message)

        text = self.processor.apply_chat_template(
            message, tokenize=False, add_generation_prompt=True
        )
        texts.append(text)

        image_inputs, video_inputs, video_args = process_vision_info(messages, return_video_kwargs=True)
        inputs = self.processor(
            text=texts,
            images=image_inputs,
            videos=video_inputs,
            padding=True,
            return_tensors="pt",
            **video_args
        )
        inputs = inputs.to("cuda")

        # Inference: Generation of the output
        generated_ids = self.model.generate(
            **inputs,
            max_new_tokens=128,
            temperature=0.5,
        )
        generated_ids_trimmed = [
            out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
        ]
        output_text = self.processor.batch_decode(
            generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
        )

        torch.cuda.empty_cache()
        return output_text[0]

