import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, random_split
from torchvision import models, transforms
import numpy as np
from pathlib import Path
import os
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
import seaborn as sns
import pandas as pd
from tqdm import tqdm
import re
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
'''我的是基于PyTorch框架的ResNet50模型的宠物识别，'''
# 设备配置
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# print(f"Using device: {device}")
# 数据集定义 - 从单个文件夹加载所有预处理图像
class PetDataset(Dataset):
    def __init__(self, data_dir, transform=None):
        self.data_dir = Path(data_dir)
        self.transform = transform
        self.image_paths = list(self.data_dir.glob("*.npy"))# 获取所有预处理图像文件
        # 提取类别信息
        self.class_names = self._extract_class_names()
        self.class_to_idx = {cls_name: idx for idx, cls_name in enumerate(sorted(self.class_names))}
        self.idx_to_class = {idx: cls_name for cls_name, idx in self.class_to_idx.items()}
        # 验证数据
        if len(self.image_paths) == 0:
            raise FileNotFoundError(f"在{data_dir}没有找到 .npy 文件 ")
        if len(self.class_names) == 0:
            raise ValueError("No valid classes found in file names")
        print(f"共找到 {len(self.image_paths)} 张照片，来自{len(self.class_names)}个类别")
    def _extract_class_names(self):
        class_names = set()
        pattern = re.compile(r"(.+?)_\d+\.npy$")
        for path in self.image_paths:
            match = pattern.match(path.name)
            if match:
                class_names.add(match.group(1))
        return list(class_names)
    def __len__(self):
        return len(self.image_paths)
    def __getitem__(self, idx):
        img_path = self.image_paths[idx]
        img_array = np.load(img_path)
        # 转换为Tensor (C, H, W)
        img_tensor = torch.from_numpy(img_array).permute(2, 0, 1).float()
        # 从文件名提取类别
        match = re.search(r"(.+?)_\d+\.npy$", img_path.name)
        if not match:
            raise ValueError(f"Invalid file name format: {img_path.name}")
        class_name = match.group(1)
        label = self.class_to_idx[class_name]
        if self.transform:
            img_tensor = self.transform(img_tensor)
        return img_tensor, label, img_path.name
# 创建ResNet50模型
def create_model(num_classes):
    model = models.resnet50(pretrained=True)
    # 冻结所有层
    for param in model.parameters():
        param.requires_grad = False
    # 替换最后的全连接层
    num_ftrs = model.fc.in_features
    model.fc = nn.Sequential(
        nn.Linear(num_ftrs, 2048),
        nn.ReLU(),
        nn.Dropout(0.5),
        nn.Linear(2048, num_classes)
    )
    return model.to(device)
# 评估函数
def evaluate_model(model, criterion, data_loader):
    model.eval()
    running_loss = 0.0
    running_corrects = 0
    all_labels = []
    all_preds = []
    all_filenames = []
    with torch.no_grad():
        for inputs, labels, filenames in tqdm(data_loader, desc="Evaluating"):
            inputs = inputs.to(device)
            labels = labels.to(device)
            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)
            loss = criterion(outputs, labels)
            running_loss += loss.item() * inputs.size(0)
            running_corrects += torch.sum(preds == labels.data)
            all_labels.extend(labels.cpu().numpy())
            all_preds.extend(preds.cpu().numpy())
            all_filenames.extend(filenames)
    loss = running_loss / len(data_loader.dataset)
    acc = running_corrects.double() / len(data_loader.dataset)
    return loss, acc, all_labels, all_preds, all_filenames
# 训练函数
def train_model(model, criterion, optimizer, scheduler, train_loader, val_loader, num_epochs=25):
    best_acc = 0.0
    history = {'训练损失': [], '验证损失': [], '训练准确率': [], '验证准确率': []}
    for epoch in range(num_epochs):
        print(f'Epoch {epoch + 1}/{num_epochs}')
        print('-' * 10)
        # 训练阶段
        model.train()
        running_loss = 0.0
        running_corrects = 0
        for inputs, labels, _ in tqdm(train_loader, desc="Training"):
            inputs = inputs.to(device)
            labels = labels.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item() * inputs.size(0)
            running_corrects += torch.sum(preds == labels.data)
        epoch_loss = running_loss / len(train_loader.dataset)
        epoch_acc = running_corrects.double() / len(train_loader.dataset)
        history['训练损失'].append(epoch_loss)
        history['训练准确率'].append(epoch_acc)
        print(f'训练损失: {epoch_loss:.4f} 准确率: {epoch_acc:.4f}')
        # 验证阶段
        val_loss, val_acc, _, _, _ = evaluate_model(model, criterion, val_loader)
        history['验证损失'].append(val_loss)
        history['验证准确率'].append(val_acc)
        print(f'验证损失: {val_loss:.4f} 准确率: {val_acc:.4f}')
        # 更新学习率
        if scheduler:
            scheduler.step(val_acc)
        # 保存最佳模型
        if val_acc > best_acc:
            best_acc = val_acc
            torch.save(model.state_dict(), 'best_model20.pth')
            print('最佳模型已保存!')
    print(f'训练完成. 最佳验证准确率: {best_acc:.4f}')
    return history
# 绘制混淆矩阵
def plot_confusion_matrix(labels, preds, classes, filename='confusion_matrix20.png'):
    cm = confusion_matrix(labels, preds)
    plt.figure(figsize=(15, 15))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=classes, yticklabels=classes,
                annot_kws={"size": 8})
    plt.title('混淆矩阵', fontsize=16, y=0.99)
    plt.xlabel('预测标签', fontsize=14)
    plt.ylabel('真实标签', fontsize=14)
    plt.xticks(rotation=90, fontsize=8)
    plt.yticks(fontsize=8)
    plt.tight_layout()
    plt.savefig(filename, dpi=300, bbox_inches='tight')
    plt.show()
    # 计算每个类别的准确率
    class_accuracy = {}
    for i, class_name in enumerate(classes):
        idx = np.where(np.array(labels) == i)[0]
        correct = np.sum(np.array(preds)[idx] == i)
        class_accuracy[class_name] = correct / len(idx) if len(idx) > 0 else 0
    # 绘制类别准确率柱状图
    plt.figure(figsize=(12, 6))
    plt.bar(range(len(class_accuracy)), list(class_accuracy.values()),
            tick_label=list(class_accuracy.keys()))
    plt.title('各类别准确率')
    plt.xticks(rotation=90)
    plt.ylabel('准确率')
    plt.tight_layout()
    plt.savefig('class_accuracy20.png', dpi=150, bbox_inches='tight')
    plt.show()
    return class_accuracy
# 可视化训练历史
def plot_training_history(history, filename='training_history20.png'):
    plt.figure(figsize=(12, 5))
    # 损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(history['训练损失'], label='训练损失')
    plt.plot(history['验证损失'], label='验证损失')
    plt.title('训练和验证损失')
    plt.xlabel('训练轮次')
    plt.ylabel('损失')
    plt.legend()
    # 准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot([acc.cpu().numpy() for acc in history['训练准确率']], label='训练准确率')
    plt.plot([acc.cpu().numpy() for acc in history['验证准确率']], label='验证准确率')
    plt.title('训练和验证准确率')
    plt.xlabel('训练轮次')
    plt.ylabel('准确率')
    plt.legend() # 自动调整子图间距
    plt.tight_layout()
    plt.savefig(filename, dpi=150)
    plt.show()
# 保存错误预测示例
def save_error_examples(filenames, true_labels, pred_labels, dataset):
    error_dir = Path("error_examples")
    error_dir.mkdir(exist_ok=True)
    # 反归一化转换
    mean = np.array([0.485, 0.456, 0.406])
    std = np.array([0.229, 0.224, 0.225])
    # 收集错误预测
    errors = []
    for i, (true, pred) in enumerate(zip(true_labels, pred_labels)):
        if true != pred:
            errors.append({
                "filename": filenames[i],
                "true_label": dataset.idx_to_class[true],
                "pred_label": dataset.idx_to_class[pred]
            })
    print(f"\n发现 {len(errors)} 个分类错误的示例. 正在保存样本...")
    # 保存前25个错误示例
    plt.figure(figsize=(15, 20))
    for i, error in enumerate(errors[:25]):
        img_path = dataset.data_dir / error["filename"]
        img_array = np.load(img_path)
        # 反归一化
        img = img_array * std + mean
        img = np.clip(img, 0, 1)
        ax = plt.subplot(5, 5, i + 1)
        ax.imshow(img)
        ax.set_title(f"真实: {error['true_label']}\n预测: {error['pred_label']}", fontsize=9)
        ax.axis('off')
    plt.tight_layout()
    plt.savefig("error_examples20.png", dpi=150, bbox_inches='tight')
    plt.close()
    # 保存错误预测的CSV文件
    error_df = pd.DataFrame(errors)
    error_df.to_csv("error_predictions20.csv", index=False)
    print("错误示例已保存到 'error_examples20.png' 和 'error_predictions20.csv'")
# 主函数
def main():
    # 数据集路径
    current_dir = Path(__file__).parent
    data_dir = current_dir.parent.parent / "preprocessed_images"
    # 创建数据集
    try:
        full_dataset = PetDataset(data_dir)
    except Exception as e:
        print(f"创建数据集时出错: {e}")
        return
    print(f"总图片数: {len(full_dataset)}")
    print(f"类别: {list(full_dataset.class_to_idx.keys())}")
    # 划分数据集 (80%训练, 10%验证, 10%测试)
    train_size = int(0.8 * len(full_dataset))
    val_size = int(0.1 * len(full_dataset))
    test_size = len(full_dataset) - train_size - val_size
    train_dataset, val_dataset, test_dataset = random_split(
        full_dataset, [train_size, val_size, test_size],
        generator=torch.Generator().manual_seed(42)
    )
    print(f"训练样本数: {len(train_dataset)}")
    print(f"验证样本数: {len(val_dataset)}")
    print(f"测试样本数: {len(test_dataset)}")
    # 数据增强转换
    train_transform = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(15),
        transforms.RandomResizedCrop(224, scale=(0.8, 1.0)),
        transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),
    ])
    # 应用转换
    train_dataset.dataset.transform = train_transform
    val_dataset.dataset.transform = None
    test_dataset.dataset.transform = None
    # 创建数据加载器
    batch_size = 32
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4, pin_memory=True)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=4, pin_memory=True)
    # 创建模型
    num_classes = len(full_dataset.class_names)
    model = create_model(num_classes)
    # 训练参数
    criterion = nn.CrossEntropyLoss()#交叉熵
    optimizer = optim.Adam(model.fc.parameters(), lr=0.001, weight_decay=1e-4)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=3, verbose=True)#监控验证准确率，3轮不提升则学习率减半
    # 训练模型
    history = train_model(model, criterion, optimizer, scheduler, train_loader, val_loader, num_epochs=20)
    # 绘制训练历史
    plot_training_history(history)
    # 加载最佳模型
    model.load_state_dict(torch.load('best_model20.pth'))
    # 在测试集上评估
    test_loss, test_acc, test_labels, test_preds, test_filenames = evaluate_model(
        model, criterion, test_loader)
    print(f"\n{'=' * 50}")
    print(f"测试准确率: {test_acc:.4f}")
    print(f"测试损失: {test_loss:.4f}")
    print(f"{'=' * 50}\n")
    # 生成分类报告
    class_names = [full_dataset.idx_to_class[i] for i in range(num_classes)]
    print("分类报告:")
    print(classification_report(test_labels, test_preds, target_names=class_names, digits=4))
    # 保存分类报告为CSV
    report = classification_report(test_labels, test_preds, target_names=class_names, output_dict=True, digits=4)
    report_df = pd.DataFrame(report).transpose()
    report_df.to_csv("classification_report20.csv", index=True)
    # 生成混淆矩阵和类别准确率
    class_accuracy = plot_confusion_matrix(test_labels, test_preds, class_names)
    # 打印每个类别的准确率
    print("\n各类别准确率:")
    for class_name, acc in sorted(class_accuracy.items(), key=lambda x: x[1], reverse=True):
        print(f"{class_name}: {acc:.4f}")
    # 保存错误预测示例
    save_error_examples(test_filenames, test_labels, test_preds, full_dataset)
    # 保存类别映射
    class_mapping = {
        'idx_to_class': full_dataset.idx_to_class,
        'class_to_idx': full_dataset.class_to_idx
    }
    torch.save(class_mapping, 'class_mapping20.pth')
    print("\n类别映射已保存到 'class_mapping20.pth'")
if __name__ == "__main__":
    main()