"""

Saving Qwen2.5 VL vision encoder

"""

from transformers import (
    AutoTokenizer,
    AutoProcessor,
)

try:
    from qwen_vl_utils import process_vision_info
except ImportError as e:
    pass
try:
    from transformers.models.qwen2_5_vl import Qwen2_5_VLModel
    from transformers import Qwen2_5_VLForConditionalGeneration
except ImportError as e:
    pass
from namo.api.base import VLBase
from loguru import logger
from transformers import TextStreamer


class Qwen2_5_VL(VLBase):
    def __init__(self, model_path=None, processor_path=None, device="auto"):
        super().__init__(model_path, processor_path, device)
        # default: Load the model on the available device(s)

    def load_model(self, model_path):
        if model_path is None:
            model_path = "checkpoints/Qwen2.5-VL-7B-Instruct"
        model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
            model_path, torch_dtype="auto"
        )
        model.to(self.device)
        logger.info(f"model loaded from: {model_path}")
        return model

    def load_processor(self, processor_path):
        if processor_path is None:
            processor_path = "checkpoints/Qwen2.5-VL-7B-Instruct"
        processor = AutoProcessor.from_pretrained(processor_path)
        self.tokenizer = AutoTokenizer.from_pretrained(processor_path)
        if self.tokenizer.pad_token_id is None:
            self.tokenizer.pad_token_id = self.tokenizer.encode(
                self.tokenizer.pad_token
            )
        return processor


model_path = "checkpoints/Qwen2.5-VL-7B-Instruct"
# model_path = "checkpoints/Qwen2.5-VL-3B-Instruct"
model = Qwen2_5_VL(model_path=model_path, processor_path=model_path, device="cpu")
model.model.to("cpu")
ve = model.model.visual

ve.save_pretrained(f"{model_path}-ve".lower())
model.processor.save_pretrained(f"{model_path}-ve".lower())
print("done")

messages = [
    {
        "role": "user",
        "content": [
            {
                "type": "image",
                "image": "images/cats.jpg",
                "max_pixels": 430 * 28 * 28,
            },
            {
                "type": "image",
                "image": "images/candyjpg.JPG",
                "max_pixels": 430 * 28 * 28,
            },
            {"type": "text", "text": "Describe this 2 images."},
        ],
    }
]
image_inputs, _ = process_vision_info(messages)
text = model.processor.apply_chat_template(
    messages, tokenize=False, add_generation_prompt=True
)

print(text, image_inputs)
inputs = model.processor(
    text=[text],
    images=image_inputs,
    videos=None,
    padding=True,
    return_tensors="pt",
)
inputs = inputs.to("cpu")
print(inputs)
print(inputs["pixel_values"].shape)

a = ve(inputs["pixel_values"], inputs["image_grid_thw"])
print(a)
print(a.shape)
# 364x252 => 117 tokens
# 700x448 => 400 tokens
