#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CLIP模型示例
这个脚本演示如何使用预训练的CLIP模型进行图像和文本的匹配
CLIP (Contrastive Language-Image Pre-training) 是OpenAI开发的多模态模型
"""

import torch
import clip
from PIL import Image
import requests
from io import BytesIO
import numpy as np

def load_clip_model():
    """
    加载预训练的CLIP模型和处理器
    """
    print("正在加载CLIP模型...")
    
    # 检查是否有GPU可用
    device = "cuda" if torch.cuda.is_available() else "cpu"
    print(f"使用设备: {device}")
    
    # 加载模型和预处理器
    model, preprocess = clip.load("ViT-B/32", device=device)
    
    print("CLIP模型加载完成！")
    return model, preprocess, device

def load_image_from_url(url):
    """
    从URL加载图像
    """
    try:
        response = requests.get(url)
        image = Image.open(BytesIO(response.content))
        return image
    except Exception as e:
        print(f"加载图像失败: {e}")
        return None

def create_sample_image():
    """
    创建一个简单的示例图像（如果无法从网络加载）
    """
    # 创建一个简单的彩色图像
    import numpy as np
    from PIL import Image
    
    # 创建一个红色的正方形图像
    img_array = np.zeros((224, 224, 3), dtype=np.uint8)
    img_array[:, :, 0] = 255  # 红色通道
    
    # 在中间画一个白色圆圈
    center = 112
    radius = 50
    y, x = np.ogrid[:224, :224]
    mask = (x - center) ** 2 + (y - center) ** 2 <= radius ** 2
    img_array[mask] = [255, 255, 255]  # 白色
    
    return Image.fromarray(img_array)

def clip_image_text_matching(model, preprocess, device):
    """
    演示CLIP模型的图像-文本匹配功能
    """
    print("\n=== CLIP 图像-文本匹配示例 ===")
    
    # 尝试从网络加载示例图像
    image_urls = [
        "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
        "https://upload.wikimedia.org/wikipedia/commons/thumb/5/50/Vd-Orig.png/256px-Vd-Orig.png"
    ]
    
    image = None
    for url in image_urls:
        image = load_image_from_url(url)
        if image is not None:
            print(f"成功加载图像: {url}")
            break
    
    # 如果无法从网络加载，创建示例图像
    if image is None:
        print("无法从网络加载图像，创建示例图像...")
        image = create_sample_image()
    
    # 预处理图像
    image_input = preprocess(image).unsqueeze(0).to(device)
    
    # 定义候选文本
    text_candidates = [
        "a photo of a cat",
        "a photo of a dog", 
        "a beautiful landscape",
        "a red circle on white background",
        "a nature scene with trees",
        "an urban cityscape",
        "a geometric shape"
    ]
    
    # 对文本进行编码
    text_inputs = clip.tokenize(text_candidates).to(device)
    
    # 使用模型进行推理
    with torch.no_grad():
        # 获取图像和文本的特征
        image_features = model.encode_image(image_input)
        text_features = model.encode_text(text_inputs)
        
        # 计算相似度
        logits_per_image, logits_per_text = model(image_input, text_inputs)
        probs = logits_per_image.softmax(dim=-1).cpu().numpy()
    
    # 显示结果
    print("\n图像与文本的匹配概率:")
    print("-" * 50)
    
    # 按概率排序
    sorted_indices = np.argsort(probs[0])[::-1]
    
    for i, idx in enumerate(sorted_indices):
        probability = probs[0][idx] * 100
        print(f"{i+1}. {text_candidates[idx]:<30} {probability:.2f}%")
    
    return probs

def clip_zero_shot_classification(model, preprocess, device):
    """
    演示CLIP的零样本分类功能
    """
    print("\n=== CLIP 零样本分类示例 ===")
    
    # 创建一个简单的示例图像（蓝色背景上的黄色星形）
    img_array = np.zeros((224, 224, 3), dtype=np.uint8)
    img_array[:, :, 2] = 100  # 蓝色背景
    
    # 画一个简单的星形（用黄色三角形近似）
    center_x, center_y = 112, 112
    size = 40
    
    # 上三角
    for i in range(size):
        for j in range(i):
            if center_y - size//2 + i < 224 and center_x - i//2 + j >= 0 and center_x - i//2 + j < 224:
                img_array[center_y - size//2 + i, center_x - i//2 + j] = [255, 255, 0]
    
    # 下三角
    for i in range(size):
        for j in range(i):
            if center_y + i < 224 and center_x - i//2 + j >= 0 and center_x - i//2 + j < 224:
                img_array[center_y + i, center_x - i//2 + j] = [255, 255, 0]
    
    image = Image.fromarray(img_array)
    
    # 预处理图像
    image_input = preprocess(image).unsqueeze(0).to(device)
    
    # 定义分类标签
    class_labels = [
        "star", "circle", "square", "triangle", 
        "heart", "diamond", "cross", "arrow"
    ]
    
    # 创建文本提示
    text_prompts = [f"a photo of a {label}" for label in class_labels]
    text_inputs = clip.tokenize(text_prompts).to(device)
    
    # 进行分类
    with torch.no_grad():
        logits_per_image, logits_per_text = model(image_input, text_inputs)
        probs = logits_per_image.softmax(dim=-1).cpu().numpy()
    
    # 显示分类结果
    print("\n零样本分类结果:")
    print("-" * 40)
    
    sorted_indices = np.argsort(probs[0])[::-1]
    
    for i, idx in enumerate(sorted_indices[:5]):  # 显示前5个结果
        probability = probs[0][idx] * 100
        print(f"{i+1}. {class_labels[idx]:<15} {probability:.2f}%")

def demonstrate_clip_features(model, preprocess, device):
    """
    演示CLIP模型的特征提取功能
    """
    print("\n=== CLIP 特征提取示例 ===")
    
    # 创建示例图像
    image = create_sample_image()
    image_input = preprocess(image).unsqueeze(0).to(device)
    
    # 示例文本
    text = "a red circle"
    text_input = clip.tokenize([text]).to(device)
    
    with torch.no_grad():
        # 提取特征
        image_features = model.encode_image(image_input)
        text_features = model.encode_text(text_input)
        
        # 计算相似度
        similarity = torch.cosine_similarity(image_features, text_features)
        
        print(f"图像特征维度: {image_features.shape}")
        print(f"文本特征维度: {text_features.shape}")
        print(f"图像与文本 '{text}' 的相似度: {similarity.item():.4f}")
        
        # 特征归一化
        image_features_norm = image_features / image_features.norm(dim=-1, keepdim=True)
        text_features_norm = text_features / text_features.norm(dim=-1, keepdim=True)
        
        print(f"归一化后的图像特征范数: {image_features_norm.norm().item():.4f}")
        print(f"归一化后的文本特征范数: {text_features_norm.norm().item():.4f}")

def main():
    """
    主函数
    """
    print("CLIP模型示例程序")
    print("=" * 50)
    
    try:
        # 加载模型
        model, preprocess, device = load_clip_model()
        
        # 演示不同功能
        clip_image_text_matching(model, preprocess, device)
        clip_zero_shot_classification(model, preprocess, device)
        demonstrate_clip_features(model, preprocess, device)
        
        print("\n=== CLIP模型说明 ===")
        print("CLIP (Contrastive Language-Image Pre-training) 是一个多模态模型，具有以下特点:")
        print("1. 可以理解图像和文本之间的关系")
        print("2. 支持零样本分类（无需训练即可分类新类别）")
        print("3. 可以进行图像-文本检索")
        print("4. 提取的特征可用于下游任务")
        print("5. 模型在大规模图像-文本对上进行对比学习训练")
        
        print("\n程序执行完成！")
        
    except ImportError as e:
        print(f"导入错误: {e}")
        print("请安装必要的依赖:")
        print("pip install torch torchvision")
        print("pip install git+https://github.com/openai/CLIP.git")
        print("pip install Pillow requests numpy")
    except Exception as e:
        print(f"执行错误: {e}")

if __name__ == "__main__":
    main()