"""
CLIP模型基础使用实验脚本
包含完整的实验过程、结果分析和可视化
"""

import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.metrics import confusion_matrix, classification_report
import os
import tempfile
from datetime import datetime

from model import CLIPWrapper
from config import CLIP_CONFIG

# 设置中文字体 - 修复乱码问题
import matplotlib
matplotlib.use('Agg')  # 使用非交互式后端

# 检查可用字体并设置
plt.rcParams['font.sans-serif'] = ['DejaVu Sans', 'Arial Unicode MS', 'SimHei', 'Microsoft YaHei']
plt.rcParams['axes.unicode_minus'] = False

# 设置字体大小
plt.rcParams['font.size'] = 10
plt.rcParams['axes.titlesize'] = 12
plt.rcParams['axes.labelsize'] = 11

class CLIPExperiment:
    """CLIP模型实验类"""
    
    def __init__(self):
        self.model = None
        self.device = CLIP_CONFIG['device']
        self.results = {}
        self.experiment_time = datetime.now().strftime("%Y%m%d_%H%M%S")
        
    def setup_model(self):
        """设置模型"""
        print("=== 模型设置 ===")
        self.model = CLIPWrapper(CLIP_CONFIG['model_name'])
        self.model.eval()
        print(f"✓ 模型加载成功")
        print(f"✓ 设备: {self.device}")
        print(f"✓ 模型名称: {CLIP_CONFIG['model_name']}")
        
    def load_cifar10_data(self):
        """加载CIFAR-10数据集"""
        print("\n=== 数据加载 ===")
        
        # 数据预处理
        transform = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
        
        try:
            # 加载测试集
            test_dataset = datasets.CIFAR10(
                root='./data/cifar10', 
                train=False, 
                download=False,
                transform=transform
            )
            
            print(f"✓ CIFAR-10测试集加载成功")
            print(f"✓ 样本数量: {len(test_dataset)}")
            
            return test_dataset
            
        except Exception as e:
            print(f"❌ CIFAR-10数据集加载失败: {e}")
            return None
    
    def zero_shot_classification(self, image_path, class_names):
        """零样本分类"""
        # 为每个类别创建描述
        texts = [f"a photo of a {name}" for name in class_names]
        
        # 加载图像
        image = Image.open(image_path).convert('RGB')
        
        # 处理输入
        inputs = self.model.processor(
            text=texts,
            images=image,
            return_tensors="pt",
            padding=True
        )
        
        # 编码
        with torch.no_grad():
            image_features = self.model.encode_image(inputs['pixel_values'])
            text_features = self.model.encode_text(inputs['input_ids'])
        
        # 计算相似度
        similarity = self.model.compute_similarity(image_features, text_features)
        
        # 转换为概率
        probs = F.softmax(similarity, dim=-1)
        probs = probs.squeeze().numpy()
        
        # 获取预测结果
        predicted_class_idx = np.argmax(probs)
        predicted_class = class_names[predicted_class_idx]
        confidence = probs[predicted_class_idx]
        
        return predicted_class, confidence, probs
    
    def experiment_1_zero_shot_classification(self, num_samples=50):
        """实验1: 零样本分类"""
        print("\n=== 实验1: 零样本分类 ===")
        
        # CIFAR-10类别名称
        class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 
                       'dog', 'frog', 'horse', 'ship', 'truck']
        
        # 加载数据
        test_dataset = self.load_cifar10_data()
        if test_dataset is None:
            print("❌ 实验1无法进行，数据集加载失败")
            return
        
        # 创建临时目录
        temp_dir = tempfile.mkdtemp(prefix='clip_exp1_')
        
        # 选择样本
        samples_to_test = min(num_samples, len(test_dataset))
        demo_samples = []
        
        for i in range(samples_to_test):
            image, label = test_dataset[i]
            # 转换为PIL图像
            image_pil = transforms.ToPILImage()(image)
            
            # 保存到临时文件
            image_path = os.path.join(temp_dir, f"sample_{i}.jpg")
            image_pil.save(image_path)
            
            demo_samples.append((image_path, class_names[label], label))
        
        print(f"✓ 创建 {len(demo_samples)} 个测试样本")
        
        # 进行零样本分类
        predictions = []
        true_labels = []
        confidences = []
        all_probs = []
        
        for i, (image_path, true_label, label_idx) in enumerate(demo_samples):
            predicted_class, confidence, probs = self.zero_shot_classification(
                image_path, class_names
            )
            
            predictions.append(predicted_class)
            true_labels.append(true_label)
            confidences.append(confidence)
            all_probs.append(probs)
            
            if i % 10 == 0:
                print(f"  已完成 {i+1}/{len(demo_samples)} 个样本")
        
        # 计算准确率
        correct_predictions = sum([1 for pred, true in zip(predictions, true_labels) if pred == true])
        accuracy = correct_predictions / len(demo_samples)
        
        # 保存结果
        self.results['experiment1'] = {
            'accuracy': accuracy,
            'correct_predictions': correct_predictions,
            'total_samples': len(demo_samples),
            'predictions': predictions,
            'true_labels': true_labels,
            'confidences': confidences,
            'all_probs': all_probs,
            'class_names': class_names,
            'temp_dir': temp_dir
        }
        
        print(f"✓ 实验1完成")
        print(f"✓ 准确率: {accuracy:.4f} ({correct_predictions}/{len(demo_samples)})")
        
        return accuracy
    
    def experiment_2_similarity_analysis(self):
        """实验2: 相似度分析"""
        print("\n=== 实验2: 相似度分析 ===")
        
        if 'experiment1' not in self.results:
            print("❌ 请先运行实验1")
            return
        
        exp1_results = self.results['experiment1']
        class_names = exp1_results['class_names']
        
        # 计算平均相似度矩阵
        all_probs = np.array(exp1_results['all_probs'])
        avg_similarity = np.mean(all_probs, axis=0)
        
        # 计算类别间相似度
        class_similarity = np.zeros((len(class_names), len(class_names)))
        
        for i in range(len(class_names)):
            for j in range(len(class_names)):
                # 计算类别i和类别j之间的平均相似度
                class_i_samples = [k for k, true_label in enumerate(exp1_results['true_labels']) if true_label == class_names[i]]
                if class_i_samples:
                    class_similarity[i, j] = np.mean([all_probs[k, j] for k in class_i_samples])
        
        self.results['experiment2'] = {
            'avg_similarity': avg_similarity,
            'class_similarity': class_similarity,
            'class_names': class_names
        }
        
        print("✓ 实验2完成")
        print("✓ 相似度分析完成")
        
        return class_similarity
    
    def experiment_3_confidence_analysis(self):
        """实验3: 置信度分析"""
        print("\n=== 实验3: 置信度分析 ===")
        
        if 'experiment1' not in self.results:
            print("❌ 请先运行实验1")
            return
        
        exp1_results = self.results['experiment1']
        
        # 分析正确和错误预测的置信度
        correct_confidences = []
        wrong_confidences = []
        
        for i, (pred, true, conf) in enumerate(zip(
            exp1_results['predictions'], 
            exp1_results['true_labels'], 
            exp1_results['confidences']
        )):
            if pred == true:
                correct_confidences.append(conf)
            else:
                wrong_confidences.append(conf)
        
        self.results['experiment3'] = {
            'correct_confidences': correct_confidences,
            'wrong_confidences': wrong_confidences,
            'avg_correct_confidence': np.mean(correct_confidences) if correct_confidences else 0,
            'avg_wrong_confidence': np.mean(wrong_confidences) if wrong_confidences else 0
        }
        
        print("✓ 实验3完成")
        print("✓ 置信度分析完成")
        
        return self.results['experiment3']
    
    def visualize_results(self):
        """可视化所有实验结果"""
        print("\n=== 数据可视化 ===")
        
        # 创建可视化目录
        vis_dir = f"visualizations_{self.experiment_time}"
        os.makedirs(vis_dir, exist_ok=True)
        
        # 实验1可视化
        if 'experiment1' in self.results:
            self._visualize_experiment1(vis_dir)
        
        # 实验2可视化
        if 'experiment2' in self.results:
            self._visualize_experiment2(vis_dir)
        
        # 实验3可视化
        if 'experiment3' in self.results:
            self._visualize_experiment3(vis_dir)
        
        print(f"✓ 可视化结果保存在: {vis_dir}")
        
        return vis_dir
    
    def _visualize_experiment1(self, vis_dir):
        """可视化实验1结果"""
        exp1 = self.results['experiment1']
        class_names = exp1['class_names']
        
        # 创建图形
        fig = plt.figure(figsize=(12, 10))
        
        # 1. 准确率饼图
        ax1 = plt.subplot(2, 2, 1)
        correct = exp1['correct_predictions']
        wrong = exp1['total_samples'] - correct
        labels = ['Correct Predictions', 'Wrong Predictions']  # 使用英文避免乱码
        sizes = [correct, wrong]
        colors = ['lightgreen', 'lightcoral']
        
        wedges, texts, autotexts = ax1.pie(sizes, labels=labels, colors=colors, 
                                          autopct='%1.1f%%', startangle=90)
        ax1.set_title(f'Zero-Shot Classification Accuracy\nOverall: {exp1["accuracy"]:.4f}', 
                     fontsize=12, fontweight='bold')
        
        # 2. 混淆矩阵
        ax2 = plt.subplot(2, 2, 2)
        cm = confusion_matrix(exp1['true_labels'], exp1['predictions'], labels=class_names)
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', 
                    xticklabels=class_names, yticklabels=class_names,
                    ax=ax2)
        ax2.set_title('Confusion Matrix', fontsize=12, fontweight='bold')
        ax2.set_xlabel('Predicted Class', fontsize=11)
        ax2.set_ylabel('True Class', fontsize=11)
        plt.setp(ax2.get_xticklabels(), rotation=45, ha='right')
        plt.setp(ax2.get_yticklabels(), rotation=0)
        
        # 3. 各类别准确率
        ax3 = plt.subplot(2, 2, 3)
        class_accuracies = []
        for class_name in class_names:
            class_true = [1 for true, pred in zip(exp1['true_labels'], exp1['predictions']) 
                         if true == class_name and pred == class_name]
            class_total = sum([1 for true in exp1['true_labels'] if true == class_name])
            class_acc = len(class_true) / class_total if class_total > 0 else 0
            class_accuracies.append(class_acc)
        
        bars = ax3.bar(range(len(class_names)), class_accuracies, color='skyblue', alpha=0.8)
        ax3.set_xticks(range(len(class_names)))
        ax3.set_xticklabels(class_names, rotation=45, ha='right')
        ax3.set_title('Accuracy by Class', fontsize=12, fontweight='bold')
        ax3.set_ylabel('Accuracy', fontsize=11)
        ax3.set_ylim(0, 1)
        ax3.grid(axis='y', alpha=0.3)
        
        # 添加数值标签
        for bar, acc in zip(bars, class_accuracies):
            height = bar.get_height()
            ax3.text(bar.get_x() + bar.get_width()/2., height + 0.01,
                    f'{acc:.2f}', ha='center', va='bottom', fontsize=9)
        
        # 4. 置信度分布
        ax4 = plt.subplot(2, 2, 4)
        ax4.hist(exp1['confidences'], bins=20, alpha=0.7, color='lightblue', 
                edgecolor='black', density=True)
        ax4.set_title('Confidence Distribution', fontsize=12, fontweight='bold')
        ax4.set_xlabel('Confidence Score', fontsize=11)
        ax4.set_ylabel('Density', fontsize=11)
        ax4.grid(alpha=0.3)
        
        # 添加平均置信度线
        mean_confidence = np.mean(exp1['confidences'])
        ax4.axvline(mean_confidence, color='red', linestyle='--', 
                   label=f'Mean: {mean_confidence:.3f}')
        ax4.legend()
        
        plt.tight_layout()
        plt.savefig(f'{vis_dir}/experiment1_results.png', dpi=300, bbox_inches='tight', 
                   facecolor='white', edgecolor='none')
        plt.close(fig)
    
    def _visualize_experiment2(self, vis_dir):
        """可视化实验2结果"""
        exp2 = self.results['experiment2']
        class_names = exp2['class_names']
        
        fig = plt.figure(figsize=(14, 6))
        
        # 1. 类别间相似度热力图
        ax1 = plt.subplot(1, 2, 1)
        sns.heatmap(exp2['class_similarity'], 
                   annot=True, fmt='.3f', cmap='YlOrRd',
                   xticklabels=class_names, yticklabels=class_names,
                   ax=ax1, cbar_kws={'label': 'Similarity Score'})
        ax1.set_title('Class Similarity Matrix', fontsize=12, fontweight='bold')
        ax1.set_xlabel('Target Class', fontsize=11)
        ax1.set_ylabel('Source Class', fontsize=11)
        plt.setp(ax1.get_xticklabels(), rotation=45, ha='right')
        plt.setp(ax1.get_yticklabels(), rotation=0)
        
        # 2. 平均相似度条形图
        ax2 = plt.subplot(1, 2, 2)
        avg_similarity = exp2['avg_similarity']
        bars = ax2.bar(range(len(class_names)), avg_similarity, 
                      color='lightgreen', alpha=0.8, edgecolor='darkgreen')
        ax2.set_xticks(range(len(class_names)))
        ax2.set_xticklabels(class_names, rotation=45, ha='right')
        ax2.set_title('Average Similarity by Class', fontsize=12, fontweight='bold')
        ax2.set_ylabel('Average Similarity', fontsize=11)
        ax2.set_ylim(0, 1)
        ax2.grid(axis='y', alpha=0.3)
        
        # 添加数值标签
        for i, (bar, v) in enumerate(zip(bars, avg_similarity)):
            ax2.text(i, v + 0.01, f'{v:.3f}', ha='center', va='bottom', fontsize=9)
        
        plt.tight_layout()
        plt.savefig(f'{vis_dir}/experiment2_similarity_analysis.png', dpi=300, 
                   bbox_inches='tight', facecolor='white', edgecolor='none')
        plt.close(fig)
    
    def _visualize_experiment3(self, vis_dir):
        """可视化实验3结果"""
        exp3 = self.results['experiment3']
        
        fig = plt.figure(figsize=(12, 6))
        
        # 1. 正确和错误预测的置信度分布
        ax1 = plt.subplot(1, 2, 1)
        if exp3['correct_confidences']:
            ax1.hist(exp3['correct_confidences'], bins=15, alpha=0.7, 
                    label='Correct Predictions', color='lightgreen', 
                    edgecolor='darkgreen', density=True)
        if exp3['wrong_confidences']:
            ax1.hist(exp3['wrong_confidences'], bins=15, alpha=0.7, 
                    label='Wrong Predictions', color='lightcoral', 
                    edgecolor='darkred', density=True)
        
        ax1.set_title('Confidence Distribution: Correct vs Wrong', 
                     fontsize=12, fontweight='bold')
        ax1.set_xlabel('Confidence Score', fontsize=11)
        ax1.set_ylabel('Density', fontsize=11)
        ax1.legend()
        ax1.grid(alpha=0.3)
        
        # 添加平均置信度线
        if exp3['correct_confidences']:
            correct_mean = exp3['avg_correct_confidence']
            ax1.axvline(correct_mean, color='green', 
                       linestyle='--', alpha=0.8, 
                       label=f'Correct Mean: {correct_mean:.3f}')
        if exp3['wrong_confidences']:
            wrong_mean = exp3['avg_wrong_confidence']
            ax1.axvline(wrong_mean, color='red', 
                       linestyle='--', alpha=0.8,
                       label=f'Wrong Mean: {wrong_mean:.3f}')
        ax1.legend()
        
        # 2. 平均置信度比较
        ax2 = plt.subplot(1, 2, 2)
        categories = ['Correct Predictions', 'Wrong Predictions']
        avg_confidences = [exp3['avg_correct_confidence'], exp3['avg_wrong_confidence']]
        colors = ['lightgreen', 'lightcoral']
        
        bars = ax2.bar(categories, avg_confidences, color=colors, alpha=0.8,
                      edgecolor=['darkgreen', 'darkred'])
        ax2.set_title('Average Confidence Comparison', fontsize=12, fontweight='bold')
        ax2.set_ylabel('Average Confidence', fontsize=11)
        ax2.set_ylim(0, 1)
        ax2.grid(axis='y', alpha=0.3)
        
        # 添加数值标签
        for bar, conf in zip(bars, avg_confidences):
            ax2.text(bar.get_x() + bar.get_width()/2, conf + 0.01, 
                    f'{conf:.3f}', ha='center', va='bottom', fontweight='bold')
        
        # 添加差异标注
        diff = exp3['avg_correct_confidence'] - exp3['avg_wrong_confidence']
        ax2.text(0.5, 0.95, f'Difference: {diff:.3f}', 
                transform=ax2.transAxes, ha='center', va='top',
                bbox=dict(boxstyle='round,pad=0.3', facecolor='yellow', alpha=0.7))
        
        plt.tight_layout()
        plt.savefig(f'{vis_dir}/experiment3_confidence_analysis.png', dpi=300, 
                   bbox_inches='tight', facecolor='white', edgecolor='none')
        plt.close(fig)
    
    def generate_report(self):
        """生成实验报告"""
        print("\n=== 生成实验报告 ===")
        
        # 创建报告目录
        report_dir = f"reports_{self.experiment_time}"
        os.makedirs(report_dir, exist_ok=True)
        
        # 生成Markdown报告
        report_content = self._create_markdown_report()
        
        report_file = f"{report_dir}/clip_experiment_report_{self.experiment_time}.md"
        with open(report_file, 'w', encoding='utf-8') as f:
            f.write(report_content)
        
        print(f"✓ 实验报告已生成: {report_file}")
        
        return report_file
    
    def _create_markdown_report(self):
        """创建Markdown格式的实验报告"""
        
        report = f"""# CLIP模型基础使用实验报告

## 实验概述
- **实验时间**: {self.experiment_time}
- **实验环境**: {self.device}
- **模型名称**: {CLIP_CONFIG['model_name']}
- **数据集**: CIFAR-10测试集

## 实验参数
- 图像尺寸: {CLIP_CONFIG['image_size']}x{CLIP_CONFIG['image_size']}
- 文本最大长度: {CLIP_CONFIG['text_max_length']}
- 测试样本数: {self.results.get('experiment1', {}).get('total_samples', 0)}

## 实验结果
"""
        
        # 实验1结果
        if 'experiment1' in self.results:
            exp1 = self.results['experiment1']
            report += f"""
### 实验1: 零样本分类

**准确率**: {exp1['accuracy']:.4f} ({exp1['correct_predictions']}/{exp1['total_samples']})

**混淆矩阵**:
```
{classification_report(exp1['true_labels'], exp1['predictions'])}
```

**各类别准确率**:
"""
            
            # 计算各类别准确率
            for class_name in exp1['class_names']:
                class_true = sum([1 for true, pred in zip(exp1['true_labels'], exp1['predictions']) 
                                if true == class_name and pred == class_name])
                class_total = sum([1 for true in exp1['true_labels'] if true == class_name])
                class_acc = class_true / class_total if class_total > 0 else 0
                report += f"- {class_name}: {class_acc:.4f} ({class_true}/{class_total})\n"
        
        # 实验2结果
        if 'experiment2' in self.results:
            exp2 = self.results['experiment2']
            report += f"""
### 实验2: 相似度分析

**类别间相似度矩阵**:
- 对角线元素表示同类别的平均相似度
- 非对角线元素表示不同类别间的混淆程度

**最难区分的类别对**:
"""
            
            # 找出最难区分的类别对
            class_similarity = exp2['class_similarity']
            class_names = exp2['class_names']
            
            # 排除对角线元素
            max_confusion = 0
            confusing_pairs = []
            
            for i in range(len(class_names)):
                for j in range(len(class_names)):
                    if i != j and class_similarity[i, j] > max_confusion:
                        max_confusion = class_similarity[i, j]
                        confusing_pairs = [(class_names[i], class_names[j], class_similarity[i, j])]
                    elif i != j and class_similarity[i, j] == max_confusion:
                        confusing_pairs.append((class_names[i], class_names[j], class_similarity[i, j]))
            
            for pair in confusing_pairs[:3]:  # 显示前3个
                report += f"- {pair[0]} → {pair[1]}: {pair[2]:.4f}\n"
        
        # 实验3结果
        if 'experiment3' in self.results:
            exp3 = self.results['experiment3']
            report += f"""
### 实验3: 置信度分析

**平均置信度**:
- 正确预测: {exp3['avg_correct_confidence']:.4f}
- 错误预测: {exp3['avg_wrong_confidence']:.4f}
- 差异: {exp3['avg_correct_confidence'] - exp3['avg_wrong_confidence']:.4f}

**置信度分析**:
- 正确预测的平均置信度 {'高于' if exp3['avg_correct_confidence'] > exp3['avg_wrong_confidence'] else '低于'} 错误预测
- 这表明模型在正确预测时通常更加自信
"""
        
        report += f"""
## 实验分析

### 主要发现
1. CLIP模型在CIFAR-10数据集上展现了良好的零样本分类能力
2. 模型对于某些视觉上相似的类别存在混淆
3. 正确预测的置信度通常高于错误预测

### 局限性
1. 实验基于CIFAR-10的低分辨率图像
2. 样本数量有限，可能影响统计显著性
3. 未进行超参数调优

### 改进建议
1. 使用更高分辨率的图像数据集
2. 增加样本数量以提高统计可靠性
3. 尝试不同的文本提示模板
4. 进行超参数优化

## 可视化结果
所有可视化图表已保存在 `visualizations_{self.experiment_time}` 目录中。

---
*实验报告生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*
"""
        
        return report
    
    def run_all_experiments(self, num_samples=50):
        """运行所有实验"""
        print("开始CLIP模型基础使用实验")
        print("=" * 50)
        
        # 设置模型
        self.setup_model()
        
        # 运行实验
        accuracy = self.experiment_1_zero_shot_classification(num_samples)
        similarity_matrix = self.experiment_2_similarity_analysis()
        confidence_results = self.experiment_3_confidence_analysis()
        
        # 可视化结果
        vis_dir = self.visualize_results()
        
        # 生成报告
        report_file = self.generate_report()
        
        print("\n" + "=" * 50)
        print("实验完成!")
        print(f"总体准确率: {accuracy:.4f}")
        print(f"可视化结果: {vis_dir}")
        print(f"实验报告: {report_file}")
        print("=" * 50)
        
        return self.results

def main():
    """主函数"""
    # 创建实验实例
    experiment = CLIPExperiment()
    
    # 运行所有实验（使用50个样本）
    results = experiment.run_all_experiments(num_samples=50)
    
    return results

if __name__ == "__main__":
    main()