import torch
import torch.nn.functional as F
from PIL import Image
import os
import numpy as np
from transformers import AutoModel, AutoTokenizer, AutoImageProcessor
from transformers import AutoModelForCausalLM, BitsAndBytesConfig

# 初始化GPU内存管理，尝试缓解显存相关问题
torch.cuda.empty_cache()
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"

class ChineseCLIPSimilarityCalculator:
    def __init__(self, clip_model_path, device="cuda"):
        """
        基于 llava 模型的相似度计算器，适配给定的模型文件结构
        Args:
            clip_model_path: 本地 llava 模型文件夹路径，包含 config.json 等文件
            device: 运行设备，默认 "cuda"
        """
        self.device = device
        self.model_path = clip_model_path
        
        # 加载模型和预处理逻辑
        self.load_model()
        
        # 模型设为评估模式
        self.model.eval()
    
    def load_model(self):
        """加载本地 llava 模型及预处理逻辑，适配当前模型文件结构"""
        try:
            bnb_config = BitsAndBytesConfig(
                load_in_8bit=True,  # 8bit量化（内存占用减半）；若仍不足，改为load_in_4bit=True
                bnb_8bit_use_double_quant=True,
                bnb_8bit_quant_type="nf4",
                bnb_8bit_compute_dtype=torch.float16  # 计算时用float16，平衡精度和速度
            )
            # 半精度加载模型，自动分配设备（GPU优先，不足时使用 CPU 等）
            self.model = AutoModel.from_pretrained(
                self.model_path,
                trust_remote_code=True,
                quantization_config=bnb_config,  # 启用量化
                torch_dtype=torch.float16,  # 半精度加载，减少显存占用
                device_map="auto"
            )
            
            # 加载文本分词器，使用模型路径下的分词器文件
            self.tokenizer = AutoTokenizer.from_pretrained(
                self.model_path,
                trust_remote_code=True,
                use_fast=False  # 兼容中文分词，根据实际情况可调整
            )
            
            # 优先从模型路径加载图像预处理逻辑，适配模型文件结构
            self.preprocess = AutoImageProcessor.from_pretrained(
                self.model_path,
                trust_remote_code=True
            )
            
            print(f"成功加载本地 llava 模型: {self.model_path}")
            
        except Exception as e:
            print(f"模型加载失败: {e}")
            raise
    
    def get_image_embedding(self, image):
        """提取图像特征（基于 llava 视觉塔等组件）"""
        with torch.no_grad():
            # 统一图像输入格式处理
            if isinstance(image, Image.Image):
                pass  # 已经是 PIL 图像，直接使用
            elif isinstance(image, str) and os.path.exists(image):
                image = Image.open(image).convert("RGB")
            elif isinstance(image, torch.Tensor):
                # 处理张量格式，假设为 [C, H, W] 或 [B, C, H, W]
                if image.dim() == 4:
                    image = image[0].cpu()  # 去除批次维度
                # 转换为 PIL 图像
                image_np = (image.permute(1, 2, 0).numpy() * 255).astype(np.uint8)
                image = Image.fromarray(image_np)
            elif isinstance(image, np.ndarray):
                # 处理 numpy 数组，假设为 [H, W, C]
                if image.dtype != np.uint8:
                    image = (image * 255).astype(np.uint8)
                if image.shape[-1] == 1:  # 处理单通道图像
                    image = np.repeat(image, 3, axis=-1)
                image = Image.fromarray(image).convert("RGB")
            else:
                raise TypeError(f"不支持的图像类型: {type(image)}")
            
            # 图像预处理，使用加载好的 preprocess
            image_input = self.preprocess(
                image,
                return_tensors="pt"
            ).to(self.device, dtype=torch.float16)
            
            # 提取视觉特征，llava 通常通过 vision_tower 输出特征
            if hasattr(self.model, "vision_tower") and hasattr(self.model.vision_tower, "forward"):
                image_features = self.model.vision_tower(**image_input)
            else:
                # 兼容部分模型的特征提取方式，假设输入是 pixel_values
                image_features = self.model.vision_tower(image_input["pixel_values"])
            
            # 对特征进行标准化
            return F.normalize(image_features, dim=-1).to(dtype=torch.float16)
    
    def get_text_embeddings(self, texts):
        """提取文本特征（基于 llava 语言模型）"""
        with torch.no_grad():
            if isinstance(texts, str):
                texts = [texts]  # 转为列表统一处理
            
            # 限制批量大小，避免显存溢出
            max_batch_size = 1
            all_features = []
            
            for i in range(0, len(texts), max_batch_size):
                batch_texts = texts[i:i + max_batch_size]
                
                # 文本编码
                text_inputs = self.tokenizer(
                    batch_texts,
                    padding=True,
                    truncation=True,
                    return_tensors="pt"
                ).to(self.device)
                
                # 提取语言模型输出特征，llava 一般用 language_model
                if hasattr(self.model, "language_model"):
                    text_outputs = self.model.language_model(**text_inputs)
                    # 取最后一层隐藏状态的均值作为文本特征
                    batch_features = text_outputs.last_hidden_state.mean(dim=1)
                    all_features.append(batch_features)
            
            # 合并批次特征
            text_features = torch.cat(all_features, dim=0)
            
            # 标准化特征向量
            return F.normalize(text_features, dim=-1).to(dtype=torch.float16)
    
    def calculate_similarity(self, image, texts):
        """计算图像与文本的余弦相似度"""
        with torch.no_grad():
            # 获取特征
            image_features = self.get_image_embedding(image)
            text_features = self.get_text_embeddings(texts)
            
            # 确保特征在同一设备
            image_features = image_features.to(self.device)
            text_features = text_features.to(self.device)
            
            # 计算相似度矩阵（矩阵乘法）
            similarities = torch.matmul(image_features, text_features.T).squeeze(0).cpu().numpy()
            return similarities
    
    def filter_texts_by_similarity(self, image, texts, threshold=0.3, verbose=False):
        """根据阈值过滤文本并排序"""
        similarities = self.calculate_similarity(image, texts)
        
        # 过滤并排序
        filtered = [
            (text, sim) for text, sim in zip(texts, similarities)
            if sim >= threshold
        ]
        filtered.sort(key=lambda x: x[1], reverse=True)
        
        # 分离文本和相似度分数
        filtered_texts = [t[0] for t in filtered]
        filtered_sims = [t[1] for t in filtered]
        return filtered_texts, filtered_sims, similarities