
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
import random
import logging
from PIL import Image
from utils import crop_subimage, crop_all_subimages_by_ids, crop_all_siglip_subimages_by_ids

# 日志配置
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


# 新增：子图特征比对损失函数
def subimage_feature_matching_loss(
    model, processor, query_image_paths, vision_hidden_states, device, 
    top_k_var=16, temperature=0.07, reduction='mean'
):
    # 1. 从模型中获取后层归一化和视觉投影层
    visual_projection = model.visual_projection
    loss = 0.0
    valid_count = 0
    # 2. 提取第6层隐藏状态
    hidden_state = vision_hidden_states[6]  # 假设我们使用第6层
    # print("hidden state", hidden_state.shape)
    # 初始化local_loss变量
    local_loss = 0.0
    # 创建tqdm进度条，初始描述中不包含动态值
    progress_bar = tqdm(enumerate(hidden_state), desc="处理子图特征", total=hidden_state.shape[0], 
                       position=1, leave=False, dynamic_ncols=True)
    # 修改子图特征比对损失函数中的图像处理部分
    for idx, patch in progress_bar:
        target_local_embeds = None
        origin_local_embeds = visual_projection(patch[1:, :])  # 形状为 [num_patches, hidden_size]
        # print("origin_local_embeds", origin_local_embeds.shape)
        variances = torch.var(origin_local_embeds, dim=1)  # 在特征维
        # 获取前n个方差最大的local embedding的index
        _, top_indices = torch.topk(variances, min(top_k_var, len(variances)))  # 防止n大于patches数量
        top_indices = top_indices.tolist()
        origin_local_embeds_cropped = origin_local_embeds[top_indices]
        # print(top_indices)
        # 裁剪子图并根据top_indices选择
        # 安全地打开和关闭图像
        try:
            origin_image = Image.open(query_image_paths[idx])
            local_images = crop_all_subimages_by_ids(origin_image, 14, 14, top_indices)
            # 显式关闭图像
            origin_image.close()
        except Exception as e:
            logger.error(f"处理图像 {query_image_paths[idx]} 时出错: {e}")
            continue
        
        local_inputs = processor(images=local_images, return_tensors="pt").to(device)
        with torch.no_grad():
            target_local_embeds = model.get_image_features(**local_inputs, output_hidden_states=True)
            # print("target_local_embeds", target_local_embeds.shape)

        origin_local_embeds_cropped = torch.nn.functional.normalize(origin_local_embeds_cropped, dim=-1)
        target_local_embeds = torch.nn.functional.normalize(target_local_embeds, dim=-1)
        # 计算余弦相似度矩阵 (num_patches × num_patches)
        cos_sim = F.cosine_similarity(origin_local_embeds_cropped.unsqueeze(1), target_local_embeds.unsqueeze(0), dim=-1)
        # local_loss = 1 - cos_sim.mean()
        # 应用温度缩放
        cos_sim = cos_sim / temperature
        # 创建标签：每个样本的正确匹配是对角线元素
        batch_size = cos_sim.size(0)
        labels = torch.arange(batch_size, device=device)
        # 计算交叉熵损失
        local_loss = F.cross_entropy(cos_sim, labels, reduction=reduction)
        # print("local_loss", local_loss)
        loss += local_loss
        valid_count += 1
        
        # 实时更新进度条的后缀信息
        if valid_count > 0:
            current_mean_loss = loss / valid_count
            progress_bar.set_postfix({"当前批次Loss": f"{local_loss:.4f}", "平均Loss": f"{current_mean_loss:.4f}"})
    
    mean_loss = loss / valid_count if valid_count > 0 else torch.tensor(0.0).to(device)
    
    # 更新日志信息，同时整合进度条的描述
    # logger.info(f"子图特征处理完成 | mean loss: {mean_loss:.4f} // total loss: {loss:.4f} // valid_count: {valid_count}")
    
    # 计算完成后清理临时变量
    del origin_local_embeds, variances, top_indices, origin_local_embeds_cropped, local_images
    del local_inputs, target_local_embeds, cos_sim
    
    # 函数结束时清理剩余变量
    del visual_projection, hidden_state, progress_bar
    return mean_loss


def siglip_subimage_feature_matching_loss(
    model, processor, query_image_paths, vision_hidden_states, device, 
    top_k_var=10, temperature=0.07, reduction='mean'
):
    # 1. 从模型中获取后层归一化和视觉投影层
    visual_projection = model.vision_model.post_layernorm
    loss = 0.0
    valid_count = 0
    # 2. 提取第6层隐藏状态
    hidden_state = vision_hidden_states[22]
    # print("hidden state", hidden_state.shape)
    # 初始化local_loss变量
    local_loss = 0.0
    # 创建tqdm进度条，初始描述中不包含动态值
    progress_bar = tqdm(enumerate(hidden_state), desc="处理子图特征", total=hidden_state.shape[0], 
                       position=1, leave=False, dynamic_ncols=True)
    # 修改子图特征比对损失函数中的图像处理部分
    for idx, patch in progress_bar:
        target_local_embeds = None
        origin_local_embeds = visual_projection(patch[:, :])  # 形状为 [num_patches, hidden_size]
        # print("origin_local_embeds", origin_local_embeds.shape)
        variances = torch.var(origin_local_embeds, dim=1)  # 在特征维
        # 获取前n个方差最大的local embedding的index
        _, top_indices = torch.topk(variances, min(top_k_var, len(variances)))  # 防止n大于patches数量
        top_indices = top_indices.tolist()
        
        origin_local_embeds_cropped = origin_local_embeds[top_indices]
        # print(top_indices)
        # 裁剪子图并根据top_indices选择
        # 安全地打开和关闭图像
        try:
            origin_image = Image.open(query_image_paths[idx])
            local_images = crop_all_siglip_subimages_by_ids(origin_image, 27, 27, top_indices)
            # 显式关闭图像
            origin_image.close()
        except Exception as e:
            logger.error(f"处理图像 {query_image_paths[idx]} 时出错: {e}")
            continue
        try:
            local_inputs = processor(images=local_images, return_tensors="pt").to(device)
            with torch.no_grad():
                target_local_embeds = model.get_image_features(**local_inputs, output_hidden_states=True)
                # print("target_local_embeds", target_local_embeds.shape)
        except Exception as e:
            logger.error(f"处理图像embedding {query_image_paths[idx]} 时出错: {e}")
            continue
        origin_local_embeds_cropped = torch.nn.functional.normalize(origin_local_embeds_cropped, dim=-1)
        target_local_embeds = torch.nn.functional.normalize(target_local_embeds, dim=-1)
        # 计算余弦相似度矩阵 (num_patches × num_patches)
        cos_sim = F.cosine_similarity(origin_local_embeds_cropped.unsqueeze(1), target_local_embeds.unsqueeze(0), dim=-1)
        # local_loss = 1 - cos_sim.mean()
        # 应用温度缩放
        cos_sim = cos_sim / temperature
        # 创建标签：每个样本的正确匹配是对角线元素
        batch_size = cos_sim.size(0)
        labels = torch.arange(batch_size, device=device)
        # 计算交叉熵损失
        local_loss = F.cross_entropy(cos_sim, labels, reduction=reduction)
        # print("local_loss", local_loss)
        loss += local_loss
        valid_count += 1
        # 实时更新进度条的后缀信息
        if valid_count > 0:
            current_mean_loss = loss / valid_count
            progress_bar.set_postfix({"当前批次Loss": f"{local_loss:.4f}", "平均Loss": f"{current_mean_loss:.4f}"})
    
    mean_loss = loss / valid_count if valid_count > 0 else torch.tensor(0.0).to(device)
    # 更新日志信息，同时整合进度条的描述
    # logger.info(f"子图特征处理完成 | mean loss: {mean_loss:.4f} // total loss: {loss:.4f} // valid_count: {valid_count}")
    # 计算完成后清理临时变量
    del origin_local_embeds, variances, top_indices, origin_local_embeds_cropped, local_images
    del local_inputs, target_local_embeds, cos_sim
    # 函数结束时清理剩余变量
    del visual_projection, hidden_state, progress_bar
    return mean_loss

# 定义CLIP对比损失函数
def clip_contrastive_loss(image_features, text_features, temperature=0.07):
    """实现CLIP的对比损失函数"""
    logits_per_image = torch.matmul(image_features, text_features.T) / temperature
    logits_per_text = torch.matmul(text_features, image_features.T) / temperature
    
    batch_size = image_features.shape[0]
    labels = torch.arange(batch_size, device=image_features.device)
    
    loss_image = nn.functional.cross_entropy(logits_per_image, labels)
    loss_text = nn.functional.cross_entropy(logits_per_text, labels)
    
    return (loss_image + loss_text) / 2

def siglip_contrastive_loss(image_features, text_features, temperature=0.07):
    """实现SigLIP的对比损失函数"""
    # SigLIP使用logits直接作为sigmoid的输入，而不是像CLIP那样使用softmax
    # 计算图像到文本和文本到图像的相似度
    logits_per_image = torch.matmul(image_features, text_features.T) / temperature
    logits_per_text = torch.matmul(text_features, image_features.T) / temperature
    
    # 获取batch大小
    batch_size = image_features.shape[0]
    
    # 创建正样本标签
    labels = torch.zeros((batch_size, batch_size), device=image_features.device)
    # 对角线元素为1，表示正样本匹配
    labels.fill_diagonal_(1.0)
    
    # 使用二元交叉熵损失（sigmoid + BCE）
    loss_image = F.binary_cross_entropy_with_logits(logits_per_image, labels)
    loss_text = F.binary_cross_entropy_with_logits(logits_per_text, labels)
    
    # 返回平均损失
    return (loss_image + loss_text) / 2