import os
import sys
import torch
from PIL import Image

import requests
from modelscope import snapshot_download
from qwen_omni_utils import process_mm_info
from transformers import Qwen2_5OmniForConditionalGeneration, Qwen2_5OmniProcessor

sys.path.append('examples/custom/my_qwen2_5_omni')

from swift.llm.model.model.vlm_fo1.model.language_model.omchat_qwen2_5_vl import OmChatQwen25VLForCausalLM
from swift.llm.model.model.vlm_fo1.model.builder import load_pretrained_model
from swift.llm.model.model.vlm_fo1.mm_utils import (
    prepare_inputs,
    draw_bboxes_and_save,
    extract_predictions_to_bboxes,
)
from swift.llm.model.model.vlm_fo1.task_templates import OD_template

from swift.llm import InferRequest, PtEngine, RequestConfig

# sys.path.append('swift/llm/my_qwen2_5_omni_wsy')


def infer_hf():
    # model_dir = snapshot_download('Qwen/Qwen2.5-Omni-7B')
    # model_dir = "/data2/wushengyu/model/QwenVL/Qwen/Qwen2_5-Omni-7B"
    
    img_path = "/home/wushengyu/wsy/data/test_images/d7a2931fe4196c8c308379ddf558f5ae.jpg"             # Path to input image
    model_dir = '/data2/wushengyu/model/VLM-FO1_Qwen2.5-VL-3B-v01'  # VLM FO1 model path
    
    # Load vision-language model and tokenizer
    tokenizer, model, image_processors = load_pretrained_model(model_dir)
    
    #bbox_list = [[161.0, 11.0, 292.0, 127.0], [268.0, 61.0, 428.0, 226.0], [12.0, 100.0, 140.0, 227.0], [205.0, 188.0, 332.0, 320.0], [326.0, 202.0, 478.0, 357.0], [136.0, 106.0, 269.0, 233.0], [25.0, 206.0, 200.0, 383.0]]
    bbox_list = [[99, 18, 152, 172], [38, 4, 87, 174], [277, 95, 399, 211], [0, 3, 38, 173], [3, 1, 80, 265], [149, 106, 298, 207]]  # answer: [277, 95, 399, 211], [149, 106, 298, 207]

    # Prepare chat messages with vision input and bounding boxes
    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "image_url",
                    "image_url": {"url": img_path},
                },
                {
                    "type": "text",
                    "text": OD_template.format("the person kneeling on the ground with black cloths"),
                },
            ],
            "bbox_list": bbox_list,
        }
    ]
    
    # Prepare input for model generation
    generation_kwargs = prepare_inputs(
        model_dir, model, image_processors, tokenizer, messages,
        max_tokens=4096, top_p=0.05, temperature=0.0, do_sample=False
    )

    # Run inference and decode output
    with torch.inference_mode():
        output_ids = model.generate(**generation_kwargs)
        outputs = tokenizer.decode(output_ids[0, generation_kwargs['inputs'].shape[1]:]).strip()
        
    input_tokens = generation_kwargs['inputs'].tolist()

    # Convert output prediction (indexes) to bounding box coordinates
    bboxes = extract_predictions_to_bboxes(outputs, bbox_list)
    
    img_pil = Image.open(img_path).convert("RGB")
    # Draw detected bounding boxes and save visualization
    draw_bboxes_and_save(
        image=img_pil,
        fo1_bboxes=bboxes,
        output_path="./vlm_fo1_result2.jpg"
    )
    print("---------finished!---------------")    

    return input_tokens, outputs


def test_my_qwen2_5_vlmfo1():
    model_dir = "/data2/wushengyu/model/VLM-FO1_Qwen2.5-VL-3B-v01"
    engine = PtEngine(model_dir, model_type='vlm_fo1', attn_impl='flash_attention_2')
    infer_request = InferRequest(
        messages=[{
            'role': 'user',
            'content': '<image>Describe the image.',
        }],
        # videos=['https://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/baby.mp4'],
        images=['http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/cat.png'],
    )
    request_config = RequestConfig(temperature=0, max_tokens=512)
    
    print("------>> finished VLM-FO1 model init!")
    
    
    # input_ids = engine.default_template.encode(infer_request)['input_ids']
    # resp_list = engine.infer([infer_request], request_config)
    # resp = resp_list[0].choices[0].message.content
    
    input_ids=None
    resp=None
    
    return input_ids, resp


if __name__ == '__main__':
    import my_register
    # Enable debug mode, will print input_ids and generate_ids from `PtEngine.infer`
    os.environ['SWIFT_DEBUG'] = '1'
    
    # ##====== test huggingface infer
    # input_ids_hf, response_hf = infer_hf()
    # print("--input_ids_hf: ", input_ids_hf)
    # print("--response_hf: ", response_hf)
    
    ##====== test swift infer
    input_ids_swift, response_swift = test_my_qwen2_5_vlmfo1()
    
    # # Test input_ids and response alignment
    # assert input_ids_hf == input_ids_swift
    # assert response_hf == response_swift
