from transformers import AutoConfig
from transformers import TextStreamer
from namo.models.namo import NamoForCausalLM
from namo.models.configuration_namo import NamoConfig
from namo.utils.infer_utils import load_multi_images_maybe
from namo.utils.process_utils import tokenizer_image_token
import torch

try:
    from qwen_vl_utils import process_vision_info
except ImportError as e:
    pass


torch.set_grad_enabled(False)
device = "cuda" if torch.cuda.is_available() else "cpu"

text_config = AutoConfig.from_pretrained(
    "checkpoints/Qwen2.5-0.5B-Instruct", trust_remote_code=True
)
vision_config = AutoConfig.from_pretrained(
    # "checkpoints/aimv2-large-patch14-native", trust_remote_code=True
    # "checkpoints/aimv2-l-native-trained-base",
    # "checkpoints/siglip2-so400m-patch16-naflex-ve",
    "checkpoints/Qwen2.5-VL-3B-Instruct-ve".lower(),
    trust_remote_code=True,
)


config = NamoConfig(text_config=text_config, vision_config=vision_config)

namo_model = NamoForCausalLM(config=config).to(device)
namo_model.to(torch.bfloat16)
# namo_model.namo.load_conn_ve_llm_weights(
#     "checkpoints/namo-qwen2-500m-aimv2-native-conn-ve-mlp2x_gelu/checkpoint-2500/conn_ve_llm.bin"
# )
image_processor = namo_model.get_vision_tower().image_processor

messages = [
    {
        "role": "user",
        "content": [
            {
                "type": "image",
                "image": "images/cats.jpg",
                "max_pixels": 430 * 28 * 28,
            },
            {"type": "text", "text": "Describe this image."},
        ],
    }
]
image_inputs, _ = process_vision_info(messages)
text = image_processor.apply_chat_template(
    messages, tokenize=False, add_generation_prompt=True
)

print(text, image_inputs)
inputs = image_processor(
    text=[text],
    images=image_inputs,
    videos=None,
    padding=True,
    return_tensors="pt",
)
inputs = inputs.to("cpu")
print(inputs)
print(inputs["pixel_values"].shape)

tokenizer = image_processor.tokenizer
pixel_values = inputs["pixel_values"].to(namo_model.dtype).to(namo_model.device)
input_ids = inputs["input_ids"].to(namo_model.device)
image_grid_thw = inputs["image_grid_thw"]

streamer = TextStreamer(
    image_processor.tokenizer, skip_prompt=True, skip_special_tokens=True
)
with torch.autocast(device_type="cuda", dtype=torch.float16):
    output_ids = namo_model.generate(
        pixel_values=pixel_values,
        pixel_attention_mask=None,
        image_sizes=image_grid_thw,
        input_ids=input_ids,
        do_sample=False,
        max_new_tokens=100,
        streamer=streamer,
        use_cache=True,
        pad_token_id=tokenizer.eos_token_id,
        eos_token_id=tokenizer.eos_token_id,
    )
outputs = tokenizer.decode(output_ids[0], skip_special_tokens=True).strip()
print(f"final output:\n{outputs}")

# namo_model.generate(pixel_values=None, input_ids=input_ids, max_new_tokens=300)

model_path = "checkpoints/namo-500m-hydra"
namo_model.save_pretrained(model_path)
config.save_pretrained(model_path)
tokenizer.save_pretrained(model_path)
image_processor.save_pretrained(model_path)
