import torch
from cn_clip.clip import load_from_name, tokenize
import torch.nn.functional as F
from PIL import Image
import os
import numpy as np

class ChineseCLIPSimilarityCalculator:
    def __init__(self, clip_model_path, device="cuda"):
        self.device = device
        self.model, self.preprocess = load_from_name(
            name="ViT-B-16",
            device=device,
            download_root=clip_model_path
        )
        self.model.eval()
        self.model = self.model.to(dtype=torch.float32)  # 强制float32

    def get_image_embedding(self, image):
        # with torch.no_grad(), torch.cuda.amp.autocast('cuda',enabled=False):
        with torch.no_grad(), torch.amp.autocast(device_type='cuda', enabled=False):
            # 直接处理PIL Image类型
            if isinstance(image, Image.Image):
                pass  # 无需转换，直接使用
            
            # 处理图像路径
            elif isinstance(image, str) and os.path.exists(image):
                image = Image.open(image).convert("RGB")
            
            # 处理PyTorch张量
            elif isinstance(image, torch.Tensor):
                if image.dim() == 4:
                    image = image[0].cpu()  # 取批次中的第一张图
                # 确保张量值范围是[0, 255]且为uint8类型
                image_np = (image.permute(1, 2, 0).numpy() * 255).astype(np.uint8)
                image = Image.fromarray(image_np)
            
            # 处理NumPy数组
            elif isinstance(image, np.ndarray):
                if image.dtype != np.uint8:
                    image = (image * 255).astype(np.uint8)
                if image.shape[-1] == 1:
                    image = np.repeat(image, 3, axis=-1)  # 单通道转RGB
                image = Image.fromarray(image).convert("RGB")
            
            else:
                raise TypeError(f"不支持的图像类型: {type(image)}")
            
            # 此时image必定是PIL Image，可以安全地应用预处理
            image_input = self.preprocess(image).unsqueeze(0).to(self.device, dtype=torch.float32)
            image_features = self.model.encode_image(image_input)
            return F.normalize(image_features, dim=-1).to(dtype=torch.float32)
    # ---------------- 补全缺失的get_text_embeddings方法 ----------------
    def get_text_embeddings(self, texts):
        """获取文本嵌入（关键方法，之前被遗漏）"""
        # with torch.no_grad(), torch.cuda.amp.autocast(enabled=False):
        with torch.no_grad(), torch.amp.autocast(device_type='cuda', enabled=False):
            if isinstance(texts, str):
                texts = [texts]
            # 分词并转换为模型可接受的格式
            text_tokens = tokenize(texts).to(self.device, dtype=torch.long)
            text_features = self.model.encode_text(text_tokens)
            return F.normalize(text_features, dim=-1).to(dtype=torch.float32)

    def calculate_similarity(self, image, texts):
        """计算图像与文本的相似度"""
        # with torch.no_grad(), torch.cuda.amp.autocast('cuda',enabled=False):
        with torch.no_grad(), torch.amp.autocast(device_type='cuda', enabled=False):
            image_features = self.get_image_embedding(image)
            text_features = self.get_text_embeddings(texts)  # 现在可以正常调用
            
            image_features = image_features.to(dtype=torch.float32, device=self.device)
            text_features = text_features.to(dtype=torch.float32, device=self.device)
            
            similarities = torch.matmul(image_features, text_features.T).squeeze(0).cpu().numpy()
            return similarities

    def filter_texts_by_similarity(self, image, texts, threshold=0.3, verbose=False):
        """过滤并返回标签和相似度"""
        similarities = self.calculate_similarity(image, texts)
        
        filtered = [
            (text, sim) for text, sim in zip(texts, similarities)
            if sim >= threshold
        ]
        filtered.sort(key=lambda x: x[1], reverse=True)
        
        filtered_texts = [t[0] for t in filtered]
        filtered_sims = [t[1] for t in filtered]
        return filtered_texts, filtered_sims, similarities