import os

import clip
import matplotlib.pyplot as plt
import numpy as np
import skimage
import torch
from PIL import Image
from torchvision.datasets import CIFAR100

# 打印 PyTorch 版本
print("Torch version:", torch.__version__)

# 检查 CLIP 可用的模型
clip.available_models()

# 加载 CLIP 模型及其预处理方法
model, preprocess = clip.load("ViT-B/32")
model.eval()
input_resolution = model.visual.input_resolution
context_length = model.context_length
vocab_size = model.vocab_size

# 打印模型参数信息
print("Model parameters:", f"{np.sum([int(np.prod(p.shape)) for p in model.parameters()]):,}")
print("Input resolution:", input_resolution)
print("Context length:", context_length)
print("Vocab size:", vocab_size)

# 测试 CLIP 的 tokenize 函数
clip.tokenize("Hello World!")

# 定义图像描述字典
descriptions = {
    "page": "a page of text about segmentation",
    "chelsea": "a facial photo of a tabby cat",
    "astronaut": "a portrait of an astronaut with the American flag",
    "rocket": "a rocket standing on a launchpad",
    "motorcycle_right": "a red motorcycle standing in a garage",
    "camera": "a person looking at a camera on a tripod",
    "horse": "a black-and-white silhouette of a horse",
    "coffee": "a cup of coffee on a saucer"
}

# 准备存储图像和描述的列表
original_images = []
images = []
texts = []

# 设置图像的展示大小
plt.figure(figsize=(16, 5))

# 加载图像并展示，最多展示四个子图
for filename in [filename for filename in os.listdir(skimage.data_dir) if
                 filename.endswith(".png") or filename.endswith(".jpg")]:
    name = os.path.splitext(filename)[0]
    if name not in descriptions:
        continue

    # 打开并预处理图像
    image = Image.open(os.path.join(skimage.data_dir, filename)).convert("RGB")

    # 每个子图最多显示四个图像
    plt.subplot(2, 4, len(images) + 1)
    plt.imshow(image)
    plt.title(f"{filename}\n{descriptions[name]}")
    plt.xticks([])  # 去掉坐标轴
    plt.yticks([])

    # 将图像和描述添加到列表
    original_images.append(image)
    images.append(preprocess(image))
    texts.append(descriptions[name])

plt.tight_layout()  # 调整子图之间的间距

# 将图像和文本输入模型进行特征提取
image_input = torch.tensor(np.stack(images))
text_tokens = clip.tokenize(["This is " + desc for desc in texts])

with torch.no_grad():
    image_features = model.encode_image(image_input).float()
    text_features = model.encode_text(text_tokens).float()

# 归一化图像和文本特征
image_features /= image_features.norm(dim=-1, keepdim=True)
text_features /= text_features.norm(dim=-1, keepdim=True)

# 计算余弦相似度
similarity = text_features.cpu().numpy() @ image_features.cpu().numpy().T

# 设置相似度矩阵的显示大小
count = len(descriptions)
plt.figure(figsize=(20, 14))
plt.imshow(similarity, vmin=0.1, vmax=0.3)
plt.yticks(range(count), texts, fontsize=18)  # 设置 y 轴的文本标签
plt.xticks([])

# 将原图像放置在相似度矩阵上
for i, image in enumerate(original_images):
    plt.imshow(image, extent=(i - 0.5, i + 0.5, -1.6, -0.6), origin="lower")

# 在相似度矩阵上显示每个值
for x in range(similarity.shape[1]):
    for y in range(similarity.shape[0]):
        plt.text(x, y, f"{similarity[y, x]:.2f}", ha="center", va="center", size=12)

# 隐藏边框
for side in ["left", "top", "right", "bottom"]:
    plt.gca().spines[side].set_visible(False)

# 设置坐标轴范围
plt.xlim([-0.5, count - 0.5])
plt.ylim([count + 0.5, -2])

# 设置标题
plt.title("Cosine similarity between text and image features", size=20)

# 加载 CIFAR100 数据集
cifar100 = CIFAR100(os.path.expanduser("~/.cache"), transform=preprocess, download=True)

# 为 CIFAR100 数据集的每个类别创建描述
text_descriptions = [f"This is a photo of a {label}" for label in cifar100.classes]
text_tokens = clip.tokenize(text_descriptions)

# 提取 CIFAR100 类别的文本特征
with torch.no_grad():
    text_features = model.encode_text(text_tokens).float()
    text_features /= text_features.norm(dim=-1, keepdim=True)

# 计算图像和文本的匹配概率
text_probs = (100.0 * image_features @ text_features.T).softmax(dim=-1)
top_probs, top_labels = text_probs.cpu().topk(5, dim=-1)

# 创建展示图像和类别的子图
plt.figure(figsize=(16, 16))

# 每个子图显示一个图像和其对应的概率条形图
for i, image in enumerate(original_images):
    plt.subplot(4, 4, 2 * i + 1)  # 显示图像
    plt.imshow(image)
    plt.xticks([])
    plt.yticks([])

    # 显示概率条形图
    plt.subplot(4, 4, 2 * i + 2)
    y = np.arange(top_probs.shape[-1])
    plt.grid()
    plt.barh(y, top_probs[i])
    plt.gca().invert_yaxis()  # 反转 y 轴
    plt.gca().set_axisbelow(True)
    plt.yticks(y, [cifar100.classes[index] for index in top_labels[i].numpy()])
    plt.xlabel("probability")
    plt.tight_layout(pad=2.0)  # 增加子图之间的间距，防止重叠

# 调整所有子图的布局，防止重叠
plt.subplots_adjust(wspace=0.3, hspace=1)

# 显示所有图像
plt.show()
