"""
CLIP模型基础使用演示脚本
快速展示CLIP模型的核心功能
"""

import torch
import torch.nn.functional as F
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import os

from model import CLIPWrapper
from config import CLIP_CONFIG

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

def demo_image_text_similarity():
    """演示图像-文本相似度计算"""
    print("=== 图像-文本相似度演示 ===")
    
    # 加载模型
    model = CLIPWrapper(CLIP_CONFIG['model_name'])
    model.eval()
    print("✓ 模型加载成功")
    
    # 测试图像路径
    image_path = "data/images/animal_cat.jpg"
    
    # 测试文本
    texts = [
        "a photo of a cat",
        "a photo of a dog", 
        "a photo of a car",
        "a photo of a tree",
        "a photo of a building"
    ]
    
    # 加载图像
    image = Image.open(image_path).convert('RGB')
    
    # 处理输入
    inputs = model.processor(
        text=texts,
        images=image,
        return_tensors="pt",
        padding=True
    )
    
    # 编码
    with torch.no_grad():
        image_features = model.encode_image(inputs['pixel_values'])
        text_features = model.encode_text(inputs['input_ids'])
    
    # 计算相似度
    similarity = model.compute_similarity(image_features, text_features)
    
    # 转换为概率
    probs = F.softmax(similarity, dim=-1)
    probs = probs.squeeze().numpy()
    
    print(f"图像: {image_path}")
    print("\n文本相似度结果:")
    for text, prob in zip(texts, probs):
        print(f"  '{text}': {prob:.4f}")
    
    # 可视化
    plt.figure(figsize=(10, 6))
    
    # 显示图像
    plt.subplot(1, 2, 1)
    plt.imshow(image)
    plt.axis('off')
    plt.title('输入图像')
    
    # 显示相似度条形图
    plt.subplot(1, 2, 2)
    y_pos = np.arange(len(texts))
    plt.barh(y_pos, probs, color='skyblue')
    plt.yticks(y_pos, texts)
    plt.xlabel('相似度概率')
    plt.title('图像-文本相似度')
    plt.xlim(0, 1)
    
    # 添加概率值
    for i, prob in enumerate(probs):
        plt.text(prob + 0.01, i, f'{prob:.3f}', va='center')
    
    plt.tight_layout()
    plt.savefig('image_text_similarity.png', dpi=300, bbox_inches='tight')
    plt.show()
    
    print("\n✓ 可视化结果已保存为 'image_text_similarity.png'")

def demo_text_text_similarity():
    """演示文本-文本相似度计算"""
    print("\n=== 文本-文本相似度演示 ===")
    
    # 加载模型
    model = CLIPWrapper(CLIP_CONFIG['model_name'])
    model.eval()
    
    # 查询文本
    query_text = "a cute cat playing with a ball"
    
    # 候选文本
    candidate_texts = [
        "a photo of a cat",
        "a photo of a dog",
        "a photo of a car",
        "a photo of a ball",
        "a photo of a toy"
    ]
    
    # 处理输入
    inputs = model.processor(
        text=[query_text] + candidate_texts,
        return_tensors="pt",
        padding=True
    )
    
    # 编码
    with torch.no_grad():
        text_features = model.encode_text(inputs['input_ids'])
    
    # 计算相似度
    query_features = text_features[0:1]
    candidate_features = text_features[1:]
    
    similarity = torch.matmul(query_features, candidate_features.T)
    probs = F.softmax(similarity, dim=-1)
    probs = probs.squeeze().numpy()
    
    print(f"查询文本: '{query_text}'")
    print("\n文本相似度结果:")
    for text, prob in zip(candidate_texts, probs):
        print(f"  '{text}': {prob:.4f}")
    
    # 可视化
    plt.figure(figsize=(10, 6))
    
    plt.subplot(1, 2, 1)
    plt.text(0.5, 0.5, f'查询文本:\n{query_text}', 
             ha='center', va='center', fontsize=12, wrap=True)
    plt.axis('off')
    plt.title('查询文本')
    
    plt.subplot(1, 2, 2)
    y_pos = np.arange(len(candidate_texts))
    plt.barh(y_pos, probs, color='lightgreen')
    plt.yticks(y_pos, candidate_texts)
    plt.xlabel('相似度概率')
    plt.title('文本-文本相似度')
    plt.xlim(0, 1)
    
    # 添加概率值
    for i, prob in enumerate(probs):
        plt.text(prob + 0.01, i, f'{prob:.3f}', va='center')
    
    plt.tight_layout()
    plt.savefig('text_text_similarity.png', dpi=300, bbox_inches='tight')
    plt.show()
    
    print("\n✓ 可视化结果已保存为 'text_text_similarity.png'")

def demo_zero_shot_classification():
    """演示零样本分类"""
    print("\n=== 零样本分类演示 ===")
    
    # 加载模型
    model = CLIPWrapper(CLIP_CONFIG['model_name'])
    model.eval()
    
    # 测试图像
    image_path = "data/images/animal_cat.jpg"
    
    # 分类类别
    class_names = ['cat', 'dog', 'car', 'tree', 'building', 'person', 'bird']
    
    # 为每个类别创建描述
    texts = [f"a photo of a {name}" for name in class_names]
    
    # 加载图像
    image = Image.open(image_path).convert('RGB')
    
    # 处理输入
    inputs = model.processor(
        text=texts,
        images=image,
        return_tensors="pt",
        padding=True
    )
    
    # 编码
    with torch.no_grad():
        image_features = model.encode_image(inputs['pixel_values'])
        text_features = model.encode_text(inputs['input_ids'])
    
    # 计算相似度
    similarity = model.compute_similarity(image_features, text_features)
    
    # 转换为概率
    probs = F.softmax(similarity, dim=-1)
    probs = probs.squeeze().numpy()
    
    # 获取预测结果
    predicted_class_idx = np.argmax(probs)
    predicted_class = class_names[predicted_class_idx]
    confidence = probs[predicted_class_idx]
    
    print(f"图像: {image_path}")
    print(f"预测类别: {predicted_class}")
    print(f"置信度: {confidence:.4f}")
    
    print("\n所有类别概率:")
    for name, prob in zip(class_names, probs):
        print(f"  {name}: {prob:.4f}")
    
    # 可视化
    plt.figure(figsize=(12, 5))
    
    # 显示图像
    plt.subplot(1, 2, 1)
    plt.imshow(image)
    plt.axis('off')
    plt.title('输入图像')
    
    # 显示概率条形图
    plt.subplot(1, 2, 2)
    y_pos = np.arange(len(class_names))
    colors = ['lightblue' for _ in class_names]
    
    # 高亮预测类别
    colors[predicted_class_idx] = 'lightcoral'
    
    plt.barh(y_pos, probs, color=colors)
    plt.yticks(y_pos, class_names)
    plt.xlabel('概率')
    plt.title(f'零样本分类结果\n预测: {predicted_class} (置信度: {confidence:.3f})')
    plt.xlim(0, 1)
    
    # 添加概率值
    for i, prob in enumerate(probs):
        plt.text(prob + 0.01, i, f'{prob:.3f}', va='center')
    
    plt.tight_layout()
    plt.savefig('zero_shot_classification.png', dpi=300, bbox_inches='tight')
    plt.show()
    
    print("\n✓ 可视化结果已保存为 'zero_shot_classification.png'")

def demo_image_retrieval():
    """演示图像检索"""
    print("\n=== 图像检索演示 ===")
    
    # 加载模型
    model = CLIPWrapper(CLIP_CONFIG['model_name'])
    model.eval()
    
    # 查询文本
    query_text = "a photo of a cat"
    
    # 图像库
    image_dir = "data/images"
    image_files = [f for f in os.listdir(image_dir) if f.endswith('.jpg')]
    image_paths = [os.path.join(image_dir, f) for f in image_files]
    
    print(f"查询文本: '{query_text}'")
    print(f"图像库: {len(image_paths)} 张图像")
    
    # 处理文本输入
    text_inputs = model.processor(
        text=[query_text],
        return_tensors="pt",
        padding=True
    )
    
    # 编码文本
    with torch.no_grad():
        text_features = model.encode_text(text_inputs['input_ids'])
    
    # 处理并编码所有图像
    similarities = []
    for image_path in image_paths:
        try:
            image = Image.open(image_path).convert('RGB')
            image_inputs = model.processor(
                images=image,
                return_tensors="pt"
            )
            
            with torch.no_grad():
                image_features = model.encode_image(image_inputs['pixel_values'])
            
            # 计算相似度
            similarity = torch.matmul(text_features, image_features.T)
            similarities.append(similarity.item())
        except Exception as e:
            print(f"处理图像 {image_path} 失败: {e}")
            similarities.append(0.0)
    
    # 按相似度排序
    sorted_indices = np.argsort(similarities)[::-1]
    
    print("\n图像检索结果:")
    for i, idx in enumerate(sorted_indices[:5]):  # 显示前5个
        image_name = os.path.basename(image_paths[idx])
        similarity = similarities[idx]
        print(f"  {i+1}. {image_name}: {similarity:.4f}")
    
    # 可视化前3个结果
    top_k = min(3, len(image_paths))
    fig, axes = plt.subplots(1, top_k + 1, figsize=(15, 4))
    
    # 显示查询文本
    axes[0].text(0.5, 0.5, f'查询文本:\n{query_text}', 
                ha='center', va='center', fontsize=10, wrap=True)
    axes[0].axis('off')
    axes[0].set_title('查询')
    
    # 显示前k个最相似的图像
    for i, idx in enumerate(sorted_indices[:top_k]):
        try:
            image = Image.open(image_paths[idx])
            axes[i+1].imshow(image)
            axes[i+1].axis('off')
            axes[i+1].set_title(f'第{i+1}名\n相似度: {similarities[idx]:.3f}')
        except Exception as e:
            axes[i+1].text(0.5, 0.5, f'加载失败\n{os.path.basename(image_paths[idx])}', 
                          ha='center', va='center', fontsize=8)
            axes[i+1].axis('off')
    
    plt.tight_layout()
    plt.savefig('image_retrieval.png', dpi=300, bbox_inches='tight')
    plt.show()
    
    print("\n✓ 可视化结果已保存为 'image_retrieval.png'")

def main():
    """主演示函数"""
    print("CLIP模型基础使用演示")
    print("=" * 50)
    
    # 演示图像-文本相似度
    demo_image_text_similarity()
    
    # 演示文本-文本相似度
    demo_text_text_similarity()
    
    # 演示零样本分类
    demo_zero_shot_classification()
    
    # 演示图像检索
    demo_image_retrieval()
    
    print("\n" + "=" * 50)
    print("演示完成!")
    print("所有可视化图表已保存为PNG文件")
    print("=" * 50)

if __name__ == "__main__":
    main()