import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import os
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, classification_report
import time
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)

logger.info("开始执行FashionMNIST随机森林分类任务")

# 记录开始时间
start_time = time.time()

# 加载FashionMNIST完整数据集
logger.info("准备数据转换...")
transform = transforms.Compose([transforms.ToTensor()])

logger.info("开始下载并加载FashionMNIST训练数据集...")
train_dataset = torchvision.datasets.FashionMNIST(
    root='/workspace/FashionMNIST', train=True, transform=transform, download=True)
logger.info(f"训练数据集加载完成，共 {len(train_dataset)} 个样本")

logger.info("开始下载并加载FashionMNIST测试数据集...")
test_dataset = torchvision.datasets.FashionMNIST(
    root='/workspace/FashionMNIST', train=False, transform=transform, download=True)
logger.info(f"测试数据集加载完成，共 {len(test_dataset)} 个样本")

logger.info("创建数据加载器...")
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=len(train_dataset), shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=len(test_dataset), shuffle=False)

# 一次性加载所有训练数据
logger.info("开始处理训练数据...")
for images, labels in train_loader:
    logger.info(f"训练数据形状: {images.shape}")
    X_train = images.view(images.shape[0], -1).numpy()
    y_train = labels.numpy()
    logger.info(f"处理后训练数据维度: {X_train.shape}")
    logger.info(f"训练数据标签分布: {np.bincount(y_train)}")

# 一次性加载所有测试数据
logger.info("开始处理测试数据...")
for images, labels in test_loader:
    logger.info(f"测试数据形状: {images.shape}")
    X_test = images.view(images.shape[0], -1).numpy()
    y_test = labels.numpy()
    logger.info(f"处理后测试数据维度: {X_test.shape}")
    logger.info(f"测试数据标签分布: {np.bincount(y_test)}")

# 定义随机森林分类器
logger.info("配置随机森林分类器...")
# 自定义类别权重，增加Shirt类别的权重
class_weights = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 
                5: 1.0, 6: 2.5, 7: 1.0, 8: 1.0, 9: 1.0}  # 索引6是Shirt类别
rf = RandomForestClassifier(
    criterion='gini',       # Gini系数通常比entropy计算更快，且性能相似
    max_depth=15,           # 控制深度防止过拟合
    n_estimators=150,       # 增加树的数量提高稳定性和准确率
    min_samples_split=10,   # 要求至少10个样本才能分裂内部节点
    min_samples_leaf=4,     # 每个叶节点至少包含4个样本
    max_features='sqrt',    # 每棵树随机选择sqrt(784)≈28个特征
    bootstrap=True,         # 使用bootstrap采样
    oob_score=True,         # 计算袋外分数以评估模型
    random_state=42,        # 随机种子保证可重复性
    n_jobs=-1,              # 使用所有CPU加速训练
    class_weight='balanced' # 处理可能的类别不平衡
)
logger.info(f"随机森林参数: {rf.get_params()}")

logger.info("开始训练随机森林分类器...")
train_start = time.time()
rf.fit(X_train, y_train)
train_end = time.time()
logger.info(f"随机森林训练完成，耗时: {train_end - train_start:.2f} 秒")

# 进行预测
logger.info("开始在测试集上进行预测...")
pred_start = time.time()
y_pred = rf.predict(X_test)
pred_end = time.time()
logger.info(f"预测完成，耗时: {pred_end - pred_start:.2f} 秒")

# 输出性能评估
accuracy = accuracy_score(y_test, y_pred)
logger.info(f"完整测试集准确率: {accuracy:.4f}")
logger.info("详细分类报告:")
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
               'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
report = classification_report(y_test, y_pred, target_names=class_names)
print(report)

# 特征重要性
logger.info("特征重要性前10位:")
feature_importances = rf.feature_importances_
# 获取前10个最重要的特征索引
top_indices = np.argsort(feature_importances)[-10:][::-1]
for i, idx in enumerate(top_indices):
    logger.info(f"特征 {idx}: {feature_importances[idx]:.6f}")

def visualize_predictions(X_test, y_test, y_pred, num_samples=10):
    """
    可视化测试集中的样本及其预测结果，并保存图像
    
    参数:
    - X_test: 测试数据特征
    - y_test: 测试数据真实标签
    - y_pred: 模型预测的标签
    - num_samples: 要可视化的样本数量
    """
    logger.info(f"开始可视化 {num_samples} 个测试样本的预测结果...")
    
    # 设置保存路径为FashionMNIST下的output文件夹
    save_dir = '/workspace/FashionMNIST/output'
    
    # FashionMNIST的类别名称
    class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
                   'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
    
    # 随机选择num_samples个样本
    np.random.seed(42)  # 设置随机种子，确保结果可复现
    indices = np.random.choice(len(y_test), num_samples, replace=False)
    logger.info(f"选择的样本索引: {indices}")
    
    # 统计选中样本的预测正确率
    correct_count = sum(y_pred[idx] == y_test[idx] for idx in indices)
    logger.info(f"选中样本中正确预测的数量: {correct_count}/{num_samples}, 正确率: {correct_count/num_samples:.2f}")
    
    # 创建一个网格图
    fig = plt.figure(figsize=(12, 8))
    
    # 将标题改为 Random Forest
    fig.suptitle("Random Forest Model Predictions on FashionMNIST Test Set", fontsize=16)
    
    for i, idx in enumerate(indices):
        # 创建子图
        plt.subplot(2, num_samples//2, i+1)
        
        # 将一维特征重塑为28x28的图像
        img = X_test[idx].reshape(28, 28)
        plt.imshow(img, cmap='gray')
        
        # 设置标题为预测结果，颜色根据预测是否正确而定
        title_color = 'green' if y_pred[idx] == y_test[idx] else 'red'
        
        # 使用英文标题
        title = f'Pred: {class_names[y_pred[idx]]}\nTrue: {class_names[y_test[idx]]}'
        plt.title(title, color=title_color)
        
        # 记录每个样本的预测情况
        logger.info(f"样本 {idx}: 预测={class_names[y_pred[idx]]}, 真实={class_names[y_test[idx]]}, " +
                   f"正确={y_pred[idx] == y_test[idx]}")
        
        # 隐藏坐标轴
        plt.axis('off')
    
    plt.tight_layout()
    plt.subplots_adjust(top=0.9)  # 为总标题留出空间
    
    # 创建保存目录（如果不存在）
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
        logger.info(f"创建输出目录: {save_dir}")
    
    # 修改输出文件名为随机森林对应的名称
    file_path = os.path.join(save_dir, "rf_fashion_mnist_predictions.png")
    
    # 保存图像
    plt.savefig(file_path, dpi=300, bbox_inches='tight')
    logger.info(f"图像已保存到: {file_path}")
    
    # 显示图像
    plt.show()

# 调用绘图函数显示预测结果并保存图像
visualize_predictions(X_test, y_test, y_pred, num_samples=10)

from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np

# 添加主成分分析(PCA)降维并可视化分类结果
def visualize_pca_results(X_train, X_test, y_train, y_test, y_pred):
    """
    使用PCA将高维特征降维到2D并可视化分类结果
    
    参数:
    - X_train: 训练数据特征
    - X_test: 测试数据特征
    - y_train: 训练数据标签
    - y_test: 测试数据真实标签
    - y_pred: 模型预测的标签
    """
    logger.info("开始PCA降维分析...")
    
    # 初始化PCA，降维到2个主成分
    pca = PCA(n_components=3)
    
    # 使用训练数据拟合PCA模型
    logger.info("使用训练数据拟合PCA模型...")
    pca.fit(X_train)
    
    # 记录PCA解释的方差比例
    explained_variance = pca.explained_variance_ratio_
    logger.info(f"两个主成分解释的方差比例: {explained_variance[0]:.4f}, {explained_variance[1]:.4f}")
    logger.info(f"累计解释的方差比例: {sum(explained_variance):.4f}")
    
    # 对测试数据进行降维
    logger.info("对测试数据进行PCA降维...")
    X_test_pca = pca.transform(X_test)
    
    # 设置保存路径
    save_dir = '/workspace/FashionMNIST/output'
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
        logger.info(f"创建输出目录: {save_dir}")
    
    # 绘制PCA降维后的测试数据分布图
    logger.info("Drawing PCA dimensionality reduction scatter plot...")
    
    # FashionMNIST的类别名称
    class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
                   'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
    
    # 为了使图像更清晰，随机选择一部分测试点
    np.random.seed(42)
    sample_indices = np.random.choice(len(y_test), min(2000, len(y_test)), replace=False)
    
    # 创建图像
    plt.figure(figsize=(14, 10))
    
    # 定义每个类别的标记形状和颜色
    markers = ['o', 'v', '^', '<', '>', 's', 'p', 'h', 'D', '*']
    colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', 
              '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
    
    # 分别绘制每个类别的点
    for i, class_name in enumerate(class_names):
        # 获取当前类别的样本索引
        class_indices = np.where(y_test[sample_indices] == i)[0]
        if len(class_indices) > 0:
            # 获取实际索引
            actual_indices = sample_indices[class_indices]
            
            # 分离正确和错误预测的点
            correct_mask = y_pred[actual_indices] == i
            correct_points = actual_indices[correct_mask]
            incorrect_points = actual_indices[~correct_mask]
            
            # 绘制正确预测的点
            if len(correct_points) > 0:
                plt.scatter(X_test_pca[correct_points, 0], X_test_pca[correct_points, 1], 
                          marker=markers[i], color=colors[i], s=50, alpha=0.6, 
                          label=f'{class_name}', edgecolors='black', linewidth=0.5)
            
            # 绘制错误预测的点
            if len(incorrect_points) > 0:
                plt.scatter(X_test_pca[incorrect_points, 0], X_test_pca[incorrect_points, 1], 
                          marker='x', color='red', s=40, alpha=0.8)
    
    # 添加类别中心点并注明类别名称
    for i, class_name in enumerate(class_names):
        class_indices = np.where(y_test == i)[0]
        if len(class_indices) > 0:
            center_x = np.mean(X_test_pca[class_indices, 0])
            center_y = np.mean(X_test_pca[class_indices, 1])
            
            # 绘制带有黑色边框的更大的标记点作为类别中心
            plt.scatter(center_x, center_y, marker=markers[i], s=200, 
                      color=colors[i], edgecolor='black', linewidth=2.0)
            
            # 添加类别名称标签
            plt.annotate(class_name, (center_x, center_y), 
                       xytext=(8, 8), textcoords='offset points', 
                       fontsize=12, fontweight='bold', 
                       bbox=dict(boxstyle="round,pad=0.3", fc="white", ec="gray", alpha=0.7))
    
    # 添加标题和标签
    plt.title('FashionMNIST Dataset PCA Visualization (Random Forest Classification)', fontsize=16)
    plt.xlabel(f'First Principal Component (Explained Variance: {explained_variance[0]:.2%})', fontsize=12)
    plt.ylabel(f'Second Principal Component (Explained Variance: {explained_variance[1]:.2%})', fontsize=12)
    
    # 添加准确率信息
    accuracy = accuracy_score(y_test, y_pred)
    plt.annotate(f'Model Accuracy: {accuracy:.4f}', 
                xy=(0.02, 0.02), xycoords='axes fraction', fontsize=12, 
                bbox=dict(boxstyle="round,pad=0.3", fc="white", ec="gray", alpha=0.8))
    
    # 添加单独的"错误预测"条目到图例
    plt.scatter([], [], marker='x', color='red', s=40, label='Incorrect Predictions')
    
    # 添加图例，设置为多列以节省空间
    plt.legend(loc='upper center', fontsize=10, bbox_to_anchor=(0.5, -0.05), 
              ncol=5, frameon=True, fancybox=True, shadow=True)
    
    plt.grid(alpha=0.3)
    plt.tight_layout()
    
    # 保存图像
    file_path = os.path.join(save_dir, "rf_fashion_mnist_pca_visualization.png")
    plt.savefig(file_path, dpi=300, bbox_inches='tight')
    logger.info(f"PCA visualization image saved to: {file_path}")
    
    # 显示图像
    plt.show()
    
    # 额外分析：输出每个类别的分类性能
    logger.info("PCA distribution by class:")
    for i, class_name in enumerate(class_names):
        class_indices = np.where(y_test == i)[0]
        if len(class_indices) > 0:
            correct_class = np.sum(y_pred[class_indices] == i)
            class_accuracy = correct_class / len(class_indices)
            
            # 计算该类在PCA空间中的分散程度
            class_std = np.std(X_test_pca[class_indices], axis=0)
            class_dispersion = np.mean(class_std)
            
            logger.info(f"Class {class_name}: Accuracy={class_accuracy:.4f}, PCA Dispersion={class_dispersion:.4f}")

# 在主程序中调用PCA可视化函数
# 该函数应在模型训练和预测之后调用

# 在代码中添加以下行，在模型评估后调用PCA可视化函数
visualize_pca_results(X_train, X_test, y_train, y_test, y_pred)

# 记录总执行时间
end_time = time.time()
total_time = end_time - start_time
logger.info(f"任务完成，总共耗时: {total_time:.2f} 秒")

# 输出模型详细信息
logger.info(f"随机森林参数摘要:")
logger.info(f"- 树的数量: {rf.n_estimators}")
logger.info(f"- 最大深度: {rf.max_depth}")
logger.info(f"- 决策树使用的特征数量: {rf.max_features}")
logger.info(f"- 叶节点的最小样本数: {rf.min_samples_leaf}")

