from vlmeval import IDEFICS, LLaVA
from vlmeval.config import supported_VLM
import os
import sys
import torch
from functools import partial
import warnings
from PIL import Image

# 忽略bitsandbytes相关警告
warnings.filterwarnings("ignore", message="The installed version of bitsandbytes was compiled without GPU support")

# 关键修改：强制设置环境变量，使模型在CPU上运行
os.environ["CUDA_VISIBLE_DEVICES"] = ""  # 隐藏所有GPU
os.environ["USE_CUDA"] = "0"             # 一些库检查这个变量

# 打补丁以防止任何CUDA调用
original_cuda_available = torch.cuda.is_available
torch.cuda.is_available = lambda: False

# 设置HuggingFace缓存目录
os.environ["TRANSFORMERS_CACHE"] = "models"

# 配置huggingface_hub的缓存目录
os.environ["HF_HOME"] = "models"

# 全局修改：拦截半精度转换，改为float32
original_to = torch.Tensor.to
def patched_to(self, *args, **kwargs):
    # 如果试图转换为half/float16，则改为float32
    if 'dtype' in kwargs:
        if kwargs['dtype'] == torch.float16 or kwargs['dtype'] == torch.half:
            kwargs['dtype'] = torch.float32
    elif len(args) > 0 and isinstance(args[0], torch.dtype):
        if args[0] == torch.float16 or args[0] == torch.half:
            args = list(args)
            args[0] = torch.float32
            args = tuple(args)
    return original_to(self, *args, **kwargs)
torch.Tensor.to = patched_to

# 检查CUDA是否可用（应该总是返回False）
print(f"CUDA 是否可用: {torch.cuda.is_available()}")
print("已强制使用CPU模式运行")

# 指定本地llava-v1.5-7b模型路径
LOCAL_MODEL_PATH = "models/llava-v1.5-7b"

# 对模块的cuda函数打补丁
def apply_cuda_patches():
    """应用各种补丁来阻止CUDA调用和半精度转换"""
    
    # 保存原始的torch.Tensor.cuda方法
    original_tensor_cuda = torch.Tensor.cuda
    
    # 创建替代方法，返回原始tensor而不是cuda版本
    def patched_cuda(self, *args, **kwargs):
        return self
    
    # 应用补丁
    torch.Tensor.cuda = patched_cuda
    
    # 拦截半精度转换
    def half_to_float(self, *args, **kwargs):
        return self.float()
    
    # 替换半精度方法
    torch.Tensor.half = half_to_float
    torch.Tensor.bfloat16 = half_to_float
    
    # 补丁Tensor.to以始终使用float32而不是半精度
    def safe_to(self, *args, **kwargs):
        # 处理device参数
        if args and (args[0] == 'cuda' or (isinstance(args[0], torch.device) and args[0].type == 'cuda')):
            device_arg = 'cpu'
            args = args[1:]  # 移除device参数
        elif 'device' in kwargs and (kwargs['device'] == 'cuda' or (isinstance(kwargs['device'], torch.device) and kwargs['device'].type == 'cuda')):
            kwargs['device'] = 'cpu'
        
        # 处理dtype参数
        if 'dtype' in kwargs and (kwargs['dtype'] == torch.float16 or kwargs['dtype'] == torch.half):
            kwargs['dtype'] = torch.float32
        
        # 处理第一个位置参数是dtype的情况
        if args and isinstance(args[0], torch.dtype) and (args[0] == torch.float16 or args[0] == torch.half):
            args_list = list(args)
            args_list[0] = torch.float32
            args = tuple(args_list)
        
        # 调用原始方法
        return original_to(self, *args, **kwargs)
    
    # 应用补丁
    torch.Tensor.to = safe_to
    
    return (original_tensor_cuda, original_to)

# 修改原型：捕获transformers中的to_half调用
import types
import transformers

# 修补transformers模块中的方法
if hasattr(transformers.modeling_utils, "get_parameter_dtype"):
    original_get_parameter_dtype = transformers.modeling_utils.get_parameter_dtype
    def patched_get_parameter_dtype(parameter):
        dtype = original_get_parameter_dtype(parameter)
        if dtype == torch.float16 or dtype == torch.half:
            return torch.float32
        return dtype
    transformers.modeling_utils.get_parameter_dtype = patched_get_parameter_dtype

# 创建一个LLaVA的Direct模式类
class DirectLLaVA:
    """直接修改源文件的LLaVA运行器"""
    
    def __init__(self, model_path=None):
        import sys
        import os
        from importlib import reload
        
        print(f"使用直接修改模式加载模型: {model_path}")
        
        try:
            # 强制所有模型以float32加载
            self.model_path = model_path
            
            # 导入模块前应用补丁
            apply_cuda_patches()
            
            # 直接使用编辑过的Python模块
            import llava.model.builder
            import llava.model.llava_arch
            
            # 找到LLaVA模块的位置
            llava_path = llava.model.builder.__file__
            print(f"LLaVA模块路径: {os.path.dirname(llava_path)}")
            
            # 不修改源文件，而是更直接地patch内存中的函数
            # 加载LLaVA模型
            from vlmeval.config import supported_VLM
            self._model = supported_VLM['llava_v1.5_7b'](model_path=model_path)
            
            # 如果模型已经加载成功，再进行一次完整的float32转换
            print("转换模型到float32...")
            self._model.model = self._model.model.float()
            
            print("模型加载成功")
        except Exception as e:
            print(f"模型加载失败: {e}")
            raise
    
    def generate(self, *args, **kwargs):
        """代理到原始模型的generate方法"""
        try:
            # 再次应用补丁以确保CUDA和半精度调用被拦截
            apply_cuda_patches()
            
            # 统一转换为float32
            self._model.model = self._model.model.float()
            
            # 调用原始生成方法
            return self._model.generate(*args, **kwargs)
        except Exception as e:
            print(f"生成过程中出错: {e}")
            print("尝试转换图像输入的数据类型...")
            
            # 特殊处理：重新实现generate方法，强制转换图像为float32
            if len(args) > 0 and isinstance(args[0], list):
                from vlmeval.vlm.base import BaseModel
                
                # 构建消息列表
                message = []
                for item in args[0]:
                    if isinstance(item, str):
                        if os.path.exists(item) and os.path.isfile(item):
                            message.append({"type": "image", "value": item})
                        else:
                            message.append({"type": "text", "value": item})
                
                # 手动加载图像并转换为float32
                import numpy as np
                from PIL import Image
                
                # 处理图像
                image_paths = [msg["value"] for msg in message if msg["type"] == "image"]
                if image_paths:
                    try:
                        # 用PIL加载图像
                        images = [Image.open(path).convert("RGB") for path in image_paths]
                        
                        # 强制转换为numpy，再转为float32的tensor
                        preprocess = self._model.image_processor.preprocess
                        img_tensors = [torch.from_numpy(np.array(img)).float() for img in images]
                        
                        # 修改原始生成函数中的图像处理逻辑
                        from llava.mm_utils import process_images
                        from types import MethodType
                        
                        def safe_process_images(processor, images, *args, **kwargs):
                            # 确保图像是float32
                            result = process_images(processor, images, *args, **kwargs)
                            if hasattr(result, 'dtype') and (result.dtype == torch.float16 or result.dtype == torch.half):
                                result = result.float()
                            return result
                        
                        # 替换process_images函数
                        import llava.mm_utils
                        llava.mm_utils.process_images = safe_process_images
                        
                        # 尝试重新调用generate
                        return self._model.generate(*args, **kwargs)
                    except Exception as e2:
                        print(f"图像转换失败: {e2}")
                        raise
            
            # 如果上述处理失败，则抛出原始异常
            raise

try:
    # 检查模型路径是否存在
    if not os.path.exists(LOCAL_MODEL_PATH):
        print(f"错误: 本地模型路径 {LOCAL_MODEL_PATH} 不存在！")
        print("请确保已下载LLaVA模型到指定路径，或修改LOCAL_MODEL_PATH指向正确的位置。")
        sys.exit(1)
    
    print(f"\n正在加载本地LLaVA模型: {LOCAL_MODEL_PATH}")
    
    # 使用直接修改方式创建模型
    model = DirectLLaVA(model_path=LOCAL_MODEL_PATH)
    print("LLaVA模型加载成功!")
    
    # 测试单张图片
    print("\n测试单张图片...")
    ret = model.generate(['assets/apple.jpg', '这张图片里有什么?'])
    print(f"结果: {ret}")
    
    # 测试多张图片
    print("\n测试多张图片...")
    ret = model.generate(['assets/apple.jpg', 'assets/apple.jpg', '提供的图片中有多少个苹果?'])
    print(f"结果: {ret}")

except Exception as e:
    print(f"\n模型加载或运行出错: {e}")
    import traceback
    traceback.print_exc()