import torch
import torch.nn as nn
from transformers import CLIPModel, CLIPProcessor, CLIPConfig

class CLIPWrapper(nn.Module):
    """CLIP模型包装器"""
    
    def __init__(self, model_name='openai/clip-vit-base-patch32'):
        super(CLIPWrapper, self).__init__()
        
        # 加载预训练的CLIP模型
        try:
            # 尝试从缓存加载模型
            self.model = CLIPModel.from_pretrained(model_name, local_files_only=True)
            self.processor = CLIPProcessor.from_pretrained(model_name, local_files_only=True)
            print("✓ 从本地缓存加载CLIP模型成功")
        except:
            try:
                # 如果缓存中没有，尝试在线下载
                print("正在下载CLIP模型，请稍候...")
                self.model = CLIPModel.from_pretrained(model_name)
                self.processor = CLIPProcessor.from_pretrained(model_name)
                print("✓ 下载并加载CLIP模型成功")
            except Exception as e:
                print(f"❌ 无法加载CLIP模型: {e}")
                print("请检查网络连接或使用离线模式")
                raise
        
        # 冻结部分参数（可选）
        self.freeze_vision_encoder = False
        self.freeze_text_encoder = False
        
        if self.freeze_vision_encoder:
            for param in self.model.vision_model.parameters():
                param.requires_grad = False
                
        if self.freeze_text_encoder:
            for param in self.model.text_model.parameters():
                param.requires_grad = False
    
    def forward(self, images, texts):
        """前向传播"""
        # 检查输入类型
        if isinstance(texts, torch.Tensor):
            # 如果texts是tensor，直接使用（已经处理过的input_ids）
            pixel_values = images
            input_ids = texts
        else:
            # 否则使用processor处理
            inputs = self.process_inputs(images, texts)
            pixel_values = inputs['pixel_values']
            input_ids = inputs['input_ids']
        
        # 获取图像和文本特征
        image_features = self.model.get_image_features(pixel_values)
        text_features = self.model.get_text_features(input_ids)
        
        # 归一化特征
        image_features = image_features / image_features.norm(dim=-1, keepdim=True)
        text_features = text_features / text_features.norm(dim=-1, keepdim=True)
        
        return image_features, text_features
    
    def encode_image(self, images):
        """编码图像"""
        return self.model.get_image_features(images)
    
    def encode_text(self, texts):
        """编码文本"""
        return self.model.get_text_features(texts)
    
    def compute_similarity(self, image_features, text_features):
        """计算相似度"""
        # 归一化特征
        image_features = image_features / image_features.norm(dim=-1, keepdim=True)
        text_features = text_features / text_features.norm(dim=-1, keepdim=True)
        
        # 计算相似度矩阵
        similarity = torch.matmul(image_features, text_features.T)
        return similarity
    
    def process_inputs(self, images, texts):
        """处理输入数据"""
        # 使用processor处理图像和文本
        inputs = self.processor(
            text=texts, 
            images=images, 
            return_tensors="pt", 
            padding=True,
            truncation=True
        )
        return inputs

class CustomCLIP(nn.Module):
    """自定义CLIP模型（从头开始训练）"""
    
    def __init__(self, image_encoder, text_encoder, projection_dim=512):
        super(CustomCLIP, self).__init__()
        
        self.image_encoder = image_encoder
        self.text_encoder = text_encoder
        
        # 图像投影层
        self.image_projection = nn.Linear(
            image_encoder.config.hidden_size, projection_dim
        )
        
        # 文本投影层
        self.text_projection = nn.Linear(
            text_encoder.config.hidden_size, projection_dim
        )
        
        # 温度参数
        self.logit_scale = nn.Parameter(torch.ones([]) * torch.log(torch.tensor(1 / 0.07)))
    
    def forward(self, images, texts):
        """前向传播"""
        # 编码图像
        image_features = self.image_encoder(images).last_hidden_state[:, 0, :]  # [CLS] token
        image_embeddings = self.image_projection(image_features)
        
        # 编码文本
        text_features = self.text_encoder(**texts).last_hidden_state[:, 0, :]  # [CLS] token
        text_embeddings = self.text_projection(text_features)
        
        # 归一化
        image_embeddings = image_embeddings / image_embeddings.norm(dim=-1, keepdim=True)
        text_embeddings = text_embeddings / text_embeddings.norm(dim=-1, keepdim=True)
        
        # 计算相似度
        logit_scale = self.logit_scale.exp()
        logits_per_image = logit_scale * image_embeddings @ text_embeddings.t()
        logits_per_text = logits_per_image.t()
        
        return logits_per_image, logits_per_text