# Copyright (c) Alibaba, Inc. and its affiliates.
import os
import sys
from functools import partial, wraps
from typing import Any, Dict

from transformers import AutoConfig
from transformers.dynamic_module_utils import get_class_from_dynamic_module

from swift.llm import TemplateType
from ..constant import MLLMModelType
from ..model_arch import ModelArch
from ..register import (Model, ModelGroup, ModelMeta, get_model_tokenizer_multimodal,
                        get_model_tokenizer_with_flash_attn, register_model)
from ..utils import ModelInfo, git_clone_github, safe_snapshot_download

from safetensors.torch import load_file
import torch


def _patch_llava(model):
    if hasattr(model, '__old_generate'):
        return
    generate = model.generate
    model.__old_generate = generate

    @wraps(generate)
    def _new_generate(inputs=None, *args, **kwargs):
        input_ids = kwargs.pop('input_ids', None)
        if inputs is None and input_ids is not None:
            inputs = input_ids
        return generate(inputs, *args, **kwargs)

    model.generate = _new_generate


def get_model_tokenizer_vlm_fo1(model_dir: str,
                              model_info: ModelInfo,
                              model_kwargs: Dict[str, Any],
                              load_model: bool = True,
                              **kwargs):

    # llm_model_type = kwargs.pop('llm_model_type')
    # sys.path.append(local_repo_path)
    
    # vlm_fo1
    from swift.llm.model.model.vlm_fo1.model import OmChatQwen25VLForCausalLM, OmChatQwen25VLConfig
    
    model_config = OmChatQwen25VLConfig.from_pretrained(model_dir)
    automodel_class = OmChatQwen25VLForCausalLM

    kwargs['model_config'] = model_config
    kwargs['automodel_class'] = automodel_class
    

    model, tokenizer = get_model_tokenizer_with_flash_attn(model_dir, model_info, model_kwargs, load_model, **kwargs)
    
    if model is not None:
        model.resize_token_embeddings(len(tokenizer))
        
        # vision_tower = model.get_vision_tower()
        # device_map = str(model_kwargs.get('device_map', str(model.device)))
        # if not vision_tower.is_loaded:
        #     vision_tower.load_model(device_map=device_map)
        #     vision_tower.load_model()
            
        # 初始化 primary vision tower
        primary_vision_tower = model.get_vision_tower()
        if primary_vision_tower and not primary_vision_tower.is_loaded:
            # primary_vision_tower.load_model(model_path=model_dir, is_train=False)
            primary_vision_tower.load_model(model_path=model_dir)
            primary_vision_tower.to(device=model.device, dtype=torch.bfloat16)  # Move to correct device/dtype
        
        # 初始化 primary aux tower
        if 'qwen2.5-vl' in model_dir.lower() or 'qwen2_5_vl' in model_dir.lower():
            try:
                aux_image_size = model.config.aux_image_size  # aux_image_size: 1024
            except Exception:
                # If aux_image_size is missing from config fallback to 768
                aux_image_size = 768
                
            aux_image_aspect_ratio = model.config.aux_image_aspect_ratio  # dynamic
            aux_vision_tower = model.get_vision_tower_aux()
            # Only load if not already loaded
            if aux_vision_tower and not aux_vision_tower.is_loaded:
                aux_vision_tower.load_model(image_size=aux_image_size, is_train=False, aspect_ratio=aux_image_aspect_ratio)
                aux_vision_tower.to(device=model.device, dtype=torch.bfloat16)
                
        # _patch_llava(model)
    
    
    for name, module in model.named_modules():
        print(f"--->> {name}")
    
    return model, tokenizer 
    
register_model(
    ModelMeta(
        MLLMModelType.vlm_fo1,
        [],
        TemplateType.vlm_fo1,
        get_model_tokenizer_vlm_fo1,
        is_multimodal=True,
        architectures=['OmChatQwen25VLForCausalLM'],
        model_arch=ModelArch.vlm_fo1_qwen2_5,
        requires=['transformers>=4.50.1', 'qwen_vl_utils>=0.0.6', 'decord'],
        tags=['vision'],
    ))