from PIL import Image
import requests
import torch
import numpy as np
import matplotlib.pyplot as plt
import cv2
from PIL import Image  # 添加缺少的Image导入
# 导入我们修改后的本地模块
from src.modeling_clip import CLIPModel
from src.processing_clip import CLIPProcessor
from transformers.models.siglip import SiglipModel, SiglipProcessor
from utils import crop_subimage, crop_all_subimages_by_ids, crop_all_siglip_subimages_by_ids
# 加载模型和处理器
model = SiglipModel.from_pretrained("models/siglip-so400m-patch14-224")
processor = SiglipProcessor.from_pretrained("models/siglip-so400m-patch14-224")
print(model)
# 确保模型处于评估模式
model.eval()

# 加载图像
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)
original_image = np.array(image)

# 处理输入
inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True)

# 模型推理
outputs = model(**inputs, output_hidden_states=True)
print(outputs.keys())
# 获取结果
logits_per_image = outputs.logits_per_image  # 图像-文本相似度分数
probs = logits_per_image.softmax(dim=1)  # 转换为概率

print(logits_per_image, probs)
# 获取并打印中间层值
print("\n===== 中间层值 =====")

# 视觉模型中间层
vision_hidden_states = outputs.vision_model_output.hidden_states
print(f"视觉模型隐藏状态数量: {len(vision_hidden_states)}")
for i, hidden_state in enumerate(vision_hidden_states):
    print(f"视觉模型第 {i} 层隐藏状态形状: {hidden_state.shape}")

# 文本模型中间层
text_hidden_states = outputs.text_model_output.hidden_states
print(f"文本模型隐藏状态数量: {len(text_hidden_states)}")
for i, hidden_state in enumerate(text_hidden_states):
    print(f"文本模型第 {i} 层隐藏状态形状: {hidden_state.shape}")

# 打印结果
print(
    f"\noutputs: {outputs.keys()} "
    f"\noutputs text embeds: {outputs.text_embeds.shape} "
    f"\noutputs image embeds: {outputs.image_embeds.shape} "
    f"\noutputs vision_model_output: {outputs.vision_model_output.keys()} "
    f"\nlogits_per_image: {logits_per_image.shape} "
    f"\nprobs: {probs}"
)

# ===== 视觉隐藏状态可视化 =====
print("\n===== 视觉隐藏状态可视化 =====")

# 获取目标图像的嵌入 (取第一个图像)
image_embed = outputs.image_embeds[0]
text_embed = outputs.text_embeds[0]
# 获取视觉模型的post_layernorm层
post_layernorm = model.vision_model.post_layernorm

# 使用已学习的model.vision_model.head作为投影层
visual_projection = model.vision_model.head

# 确保visualizations目录存在
import os
os.makedirs('visualizations', exist_ok=True)

# 处理每一层的隐藏状态
for layer_idx, hidden_state in enumerate(vision_hidden_states):
    # 只处理第一个图像
    hidden_state = hidden_state[0]  # 形状: [729, 1152]，包括类标记
    
    # 保留全部隐藏状态，包括类嵌入向量
    patch_hidden_state = hidden_state.reshape(16, 16, 1152)
    
    # 应用post_layernorm
    normalized_hidden = post_layernorm(patch_hidden_state)
    
    # 应用已学习的投影层，保持3D结构
    height, width, dim = normalized_hidden.shape
    
    # 将3D张量转换为[1, height*width, dim]的3D批量格式
    # 这是因为多头注意力机制需要[batch_size, seq_len, hidden_dim]格式的输入
    projected_batch = normalized_hidden.reshape(1, height*width, dim)
    # 归一化投影后的隐藏状态
    projected_batch = projected_batch / torch.norm(projected_batch, dim=-1, keepdim=True)
    projected_batch = projected_batch.reshape(height, width, dim)
    # 计算与image_embed的相似度 (16, 16)
    similarity = torch.einsum('hwd, d -> hw', projected_batch, image_embed) # image_embed (512,) outputs.text_embeds[0]
    
    # 将相似度转换为numpy数组
    similarity_np = similarity.detach().cpu().numpy()
    
    # 调整热力图大小以匹配原图 (考虑16x16的感受野)
    heatmap = cv2.resize(similarity_np, (original_image.shape[1], original_image.shape[0]))
    
    # 归一化热力图到0-1范围
    heatmap = (heatmap - heatmap.min()) / (heatmap.max() - heatmap.min())
    
    # 应用颜色映射
    heatmap_color = plt.cm.jet(heatmap)[:, :, :3]
    
    # 转换为RGB并与原图叠加
    heatmap_color = (heatmap_color * 255).astype(np.uint8)
    overlay = cv2.addWeighted(original_image, 0.6, heatmap_color, 0.4, 0)
    
    # 显示结果
    plt.figure(figsize=(10, 10))
    plt.subplot(121)
    plt.imshow(original_image)
    plt.title('Original Image')
    plt.axis('off')
    
    plt.subplot(122)
    plt.imshow(overlay)
    plt.title(f'Layer {layer_idx} Similarity Heatmap')
    plt.axis('off')
    
    plt.tight_layout()
    plt.savefig(f'visualizations/layer_{layer_idx}_heatmap.png')
    plt.close()
    
    print(f"已保存第 {layer_idx} 层的热力图到 visualizations/layer_{layer_idx}_heatmap.png")

print("\n所有热力图已生成完成！")


# ===== 子图裁剪和相似度比较功能 =====
print("\n===== 子图裁剪和相似度比较 =====")

# 设置子图总数（使用16x16，与模型的patch数量一致）
total_h, total_w = 16, 16

# 裁剪所有子图
all_subimages = crop_all_siglip_subimages_by_ids(image, total_h, total_w)

# 选择中间坐标的子图
center_x, center_y = total_w // 2, total_h // 2
center_subimage = all_subimages[center_y * total_w + center_x]

# 显示中心子图
plt.figure(figsize=(5, 5))
plt.imshow(center_subimage)
plt.title(f'Center Subimage at ({center_x}, {center_y})')
plt.axis('off')
plt.tight_layout()
plt.savefig('visualizations/center_subimage.png')
plt.close()
print(f"已保存中心子图到 visualizations/center_subimage.png")

# 将中心子图传入模型获取embedding
center_inputs = processor(images=center_subimage, return_tensors="pt")
center_outputs = model.get_image_features(**center_inputs)
center_embedding = center_outputs[0]
center_embedding = center_embedding / torch.norm(center_embedding)

# 获取最后一层视觉隐藏状态用于比较
last_hidden_state = vision_hidden_states[-1][0]  # 使用最后一层而不是第6层

# 保留全部隐藏状态，包括类嵌入向量并重塑
patch_hidden_state = last_hidden_state[:].reshape(16, 16, 1152)  # 修改为[:]以保留全部隐藏状态

# 应用post_layernorm和visual_projection
normalized_hidden = post_layernorm(patch_hidden_state)

# 保持3D结构
height, width, dim = normalized_hidden.shape

# 转换为[1, height*width, dim]的3D批量格式
projected_batch = normalized_hidden.reshape(1, height*width, dim)

# 恢复为[height, width, dim]的3D结构
projected_hidden = projected_batch.reshape(height, width, dim)

projected_hidden = projected_hidden / torch.norm(projected_hidden, dim=-1, keepdim=True)

# 计算中心子图embedding与其他所有patch的相似度
similarity = torch.einsum('hwd, d -> hw', projected_hidden, center_embedding)

similarity_np = similarity.detach().cpu().numpy()

# 可视化相似度热力图
plt.figure(figsize=(10, 10))

# 原始图像
plt.subplot(131)
plt.imshow(original_image)
plt.title('Original Image')
plt.axis('off')

# 中心子图
plt.subplot(132)
plt.imshow(center_subimage)
plt.title(f'Center Subimage ({center_x}, {center_y})')
plt.axis('off')

# 相似度热力图
plt.subplot(133)
heatmap = plt.imshow(similarity_np, cmap='jet')
plt.title('Similarity to Center Subimage')
plt.axis('off')
plt.colorbar(heatmap, fraction=0.046, pad=0.04)

plt.tight_layout()
plt.savefig('visualizations/center_subimage_similarity.png')
plt.close()

print(f"已保存中心子图与其他patch的相似度热力图到 visualizations/center_subimage_similarity.png")
print("\n子图裁剪和相似度比较完成！")