# models.py
# 定义基于CLIP的分类模型（使用Hugging Face transformers库）。

import torch
import torch.nn as nn
from transformers import CLIPVisionModel, CLIPProcessor
from PIL import Image

class ClipClassifier(nn.Module):
    """
    使用预训练的CLIP图像编码器作为特征提取器，并添加一个可训练的线性分类头。
    现在使用Hugging Face transformers库。
    """
    def __init__(self, model_name, num_classes, device):
        super(ClipClassifier, self).__init__()
        self.device = device
        
        # 映射模型名称
        if model_name == "clip_vit_b_32":
            hf_model_name = "openai/clip-vit-base-patch32"
        elif model_name == "clip_vit_b_16":
            hf_model_name = "openai/clip-vit-base-patch16"
        elif model_name == "clip_vit_l_14":
            hf_model_name = "openai/clip-vit-large-patch14"
        else:
            # 默认使用base模型，或者直接使用传入的HuggingFace模型名
            hf_model_name = model_name if model_name.startswith("openai/") else "openai/clip-vit-base-patch32"
        
        print(f"Loading CLIP model: {hf_model_name}")
        
        # 设置国内镜像源
        import os
        os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
        print("使用国内镜像源: https://hf-mirror.com")
        
        # 检查本地缓存是否完整
        cache_dir = os.path.expanduser("~/.cache/huggingface/hub")
        model_cache_path = None
        
        if os.path.exists(cache_dir):
            # 查找CLIP模型缓存
            for item in os.listdir(cache_dir):
                if 'clip-vit-base-patch32' in item:
                    potential_path = os.path.join(cache_dir, item)
                    if os.path.isdir(potential_path):
                        # 查找snapshots目录
                        snapshots_dir = os.path.join(potential_path, 'snapshots')
                        if os.path.exists(snapshots_dir):
                            snapshot_dirs = os.listdir(snapshots_dir)
                            if snapshot_dirs:
                                model_cache_path = os.path.join(snapshots_dir, snapshot_dirs[0])
                                break
        
        try:
            if model_cache_path and os.path.exists(model_cache_path):
                # 从本地缓存加载
                print(f"✅ 发现本地缓存，从缓存加载: {model_cache_path}")
                self.vision_model = CLIPVisionModel.from_pretrained(
                    model_cache_path,
                    local_files_only=True
                )
                self.processor = CLIPProcessor.from_pretrained(
                    model_cache_path,
                    local_files_only=True,
                    use_fast=False  # 明确使用慢速处理器，避免警告
                )
            else:
                # 从镜像下载
                print("📥 本地缓存不完整，从国内镜像下载...")
                from huggingface_hub import snapshot_download
                
                # 使用镜像下载到本地
                downloaded_path = snapshot_download(
                    repo_id=hf_model_name,
                    endpoint='https://hf-mirror.com'
                )
                print(f"✅ 模型已下载到: {downloaded_path}")
                
                # 从下载的路径加载模型
                self.vision_model = CLIPVisionModel.from_pretrained(downloaded_path)
                self.processor = CLIPProcessor.from_pretrained(downloaded_path, use_fast=False)
            
            # 将模型移到指定设备（CPU或GPU）
            self.vision_model = self.vision_model.to(device)
            print(f"✅ 模型已加载到 {device}")
            
        except Exception as e:
            print(f"❌ 模型加载失败: {e}")
            print("💡 建议: 请检查网络连接或稍后重试")
            raise e
        
        # 冻结CLIP模型的参数，我们只把它用作固定的特征提取器
        for param in self.vision_model.parameters():
            param.requires_grad = False
            
        # 获取图像编码器的输出维度
        embedding_dim = self.vision_model.config.hidden_size  # 通常是512 (base) 或 768 (large)
        
        # 添加一个简单的线性分类头
        self.classifier = nn.Linear(embedding_dim, num_classes)
        
        print(f"CLIP vision encoder embedding dim: {embedding_dim}")

    def forward(self, images):
        """
        前向传播。
        参数:
            images (Tensor): 预处理后的图像张量 [batch_size, 3, 224, 224]
        返回:
            logits (Tensor): 分类头的原始输出 (未经softmax) [batch_size, num_classes]
        """
        with torch.no_grad(): # 确保CLIP部分不计算梯度
            # 直接使用预处理后的图像张量（假设已经正确预处理）
            vision_outputs = self.vision_model(pixel_values=images)
            image_features = vision_outputs.pooler_output  # [batch_size, hidden_size]
        
        # 通过分类头
        logits = self.classifier(image_features.float())
        return logits
