import os
import time
import copy
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, random_split
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import CosineAnnealingLR, OneCycleLR, StepLR
from colorama import Fore, Style, init
from tabulate import tabulate
from datetime import datetime
import psutil
import platform

from model import get_model

# 初始化colorama
init(autoreset=True)

# 设置matplotlib中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

# 设置随机种子以确保可重复性
def set_seed(seed=42):
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = True

# 获取数据集的类别
def get_classes(data_dir):
    all_data = datasets.ImageFolder(data_dir)
    return all_data.classes

# 创建数据加载器
# 针对200类鸟类分类任务优化数据增强策略
def get_data_loaders(data_dir, batch_size, train=False):
    """
    获取数据加载器
    """
    if train:
        # 训练集数据增强
        transform = transforms.Compose([
            transforms.RandomHorizontalFlip(p=0.5),
            transforms.RandomVerticalFlip(p=0.5),
            transforms.RandomApply(torch.nn.ModuleList([transforms.ColorJitter()]), p=0.1),
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        ])
        all_data = datasets.ImageFolder(data_dir, transform=transform)
        train_data_len = int(len(all_data)*0.75)
        valid_data_len = int((len(all_data) - train_data_len)/2)
        test_data_len = int(len(all_data) - train_data_len - valid_data_len)
        train_data, val_data, test_data = random_split(all_data, [train_data_len, valid_data_len, test_data_len], 
                                                      generator=torch.Generator().manual_seed(42))
        train_loader = DataLoader(
            train_data, 
            batch_size=batch_size, 
            shuffle=True, 
            num_workers=8,
            pin_memory=True,
            prefetch_factor=2
        )
        return train_loader, train_data_len
    
    else:
        # 验证集和测试集只进行基本变换
        transform = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        ])
        all_data = datasets.ImageFolder(data_dir, transform=transform)
        train_data_len = int(len(all_data)*0.70)
        valid_data_len = int((len(all_data) - train_data_len)/2)
        test_data_len = int(len(all_data) - train_data_len - valid_data_len)
        train_data, val_data, test_data = random_split(all_data, [train_data_len, valid_data_len, test_data_len],
                                                      generator=torch.Generator().manual_seed(42))
        val_loader = DataLoader(
            val_data, 
            batch_size=batch_size, 
            shuffle=True, 
            num_workers=8,
            pin_memory=True
        )
        test_loader = DataLoader(
            test_data, 
            batch_size=batch_size, 
            shuffle=True, 
            num_workers=8,
            pin_memory=True
        )
        return (val_loader, test_loader, valid_data_len, test_data_len)

# 训练模型
def train_model(model, criterion, optimizer, scheduler, dataloaders, dataset_sizes, device, num_epochs=25):
    """
    训练模型并返回训练好的模型和训练历史
    """
    since = time.time()
    
    # 创建保存模型的目录
    save_dir = os.path.join(os.getcwd(), 'model')
    os.makedirs(save_dir, exist_ok=True)
    
    # 创建保存运行结果的目录
    runs_dir = os.path.join(os.getcwd(), 'runs')
    os.makedirs(runs_dir, exist_ok=True)
    
    # 初始化最佳模型权重和准确率
    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0
    
    # 初始化指标历史记录
    metrics_history = {
        'train_loss': [],
        'train_acc': [],
        'val_loss': [],
        'val_acc': [],
        'lr': [],
        'epoch_time': []
    }
    
    # 打印训练开始的标题
    print("\n" + "="*100)
    print(f"{Fore.CYAN}{Style.BRIGHT}✨ 鸟类分类模型训练任务 ✨{Style.RESET_ALL}".center(110))
    print(f"{Fore.YELLOW}模型: {Style.BRIGHT}ResNet101{Style.RESET_ALL}".center(100))
    print(f"{Fore.YELLOW}时间: {Style.BRIGHT}{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}{Style.RESET_ALL}".center(110))
    print(f"{Fore.YELLOW}保存路径: {Style.BRIGHT}{save_dir}{Style.RESET_ALL}".center(110))
    print("="*100 + "\n")
    
    # 记录系统信息
    system_info = {
        "操作系统": f"{platform.system()} {platform.release()}",
        "Python版本": platform.python_version(),
        "PyTorch版本": torch.__version__,
        "CPU型号": platform.processor(),
        "CPU使用率": f"{psutil.cpu_percent()}%",
        "CPU核心数": f"{psutil.cpu_count(logical=False)}核心 ({psutil.cpu_count(logical=True)}线程)",
        "内存使用率": f"{psutil.virtual_memory().percent}%",
        "可用内存": f"{psutil.virtual_memory().available / (1024**3):.2f} GB",
        "总内存": f"{psutil.virtual_memory().total / (1024**3):.2f} GB"
    }
    
    if torch.cuda.is_available():
        system_info["CUDA版本"] = torch.version.cuda
        system_info["GPU设备"] = torch.cuda.get_device_name(0)
        system_info["GPU显存总量"] = f"{torch.cuda.get_device_properties(0).total_memory / (1024**3):.2f} GB"
        system_info["GPU显存使用"] = f"{torch.cuda.memory_allocated(0) / (1024**3):.2f} GB"
        system_info["GPU显存占用率"] = f"{torch.cuda.memory_allocated(0) / torch.cuda.get_device_properties(0).total_memory * 100:.2f}%"
    
    print(f"\n{Fore.CYAN}{Style.BRIGHT}🖥️ 系统资源信息 🖥️{Style.RESET_ALL}\n")
    system_table = [[k, v] for k, v in system_info.items()]
    print(tabulate(system_table, headers=["指标", "数值"], tablefmt="fancy_grid"))
    
    # 打印模型信息
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    
    model_info = [
        ["模型类型", "ResNet101"],
        ["总参数量", f"{total_params:,}"],
        ["可训练参数", f"{trainable_params:,} ({trainable_params/total_params*100:.2f}%)"],
        ["非训练参数", f"{total_params - trainable_params:,} ({(total_params-trainable_params)/total_params*100:.2f}%)"],
        ["优化器", f"{type(optimizer).__name__} (weight_decay={optimizer.param_groups[0]['weight_decay']})"],
        ["学习率", f"{optimizer.param_groups[0]['lr']:.6f}"],
        ["学习率调度器", f"{type(scheduler).__name__} (step_size={scheduler.step_size}, gamma={scheduler.gamma})"],
        ["损失函数", "CrossEntropyLoss(label_smoothing=0.1)"],
        ["训练轮数", f"{num_epochs}"],
        ["批次大小", f"训练: {next(iter(dataloaders['train']))[0].shape[0]}, 验证: {next(iter(dataloaders['val']))[0].shape[0]}"]
    ]
    
    print(f"\n{Fore.MAGENTA}{Style.BRIGHT}🏗️ 模型架构摘要 🏗️{Style.RESET_ALL}\n")
    print(tabulate(model_info, headers=["属性", "值"], tablefmt="fancy_grid"))
    
    # 打印训练配置
    train_config = [
        ["训练集大小", f"{dataset_sizes['train']}"],
        ["验证集大小", f"{dataset_sizes['val']}"],
        ["测试集大小", f"{dataset_sizes['test']}"],
        ["训练批次数", f"{len(dataloaders['train'])}"],
        ["验证批次数", f"{len(dataloaders['val'])}"],
        ["测试批次数", f"{len(dataloaders['test'])}"],
        ["设备", f"{device}"],
        ["梯度裁剪", "max_norm=1.0"],
        ["早停策略", "patience=10, min_delta=0.001"]
    ]
    
    print(f"\n{Fore.BLUE}{Style.BRIGHT}⚙️ 训练配置 ⚙️{Style.RESET_ALL}\n")
    print(tabulate(train_config, headers=["配置项", "值"], tablefmt="fancy_grid"))
    
    # 打印训练开始信息
    print(f"\n{Fore.GREEN}{Style.BRIGHT}🚀 开始训练 - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} 🚀{Style.RESET_ALL}")
    print(f"{Fore.YELLOW}{'='*100}{Style.RESET_ALL}")
    
    # 用于早停的计数器
    early_stop_counter = 0
    early_stop_patience = 10
    min_delta = 0.001
    
    for epoch in range(num_epochs):
        epoch_start_time = time.time()
        
        # 记录当前学习率
        current_lr = optimizer.param_groups[0]['lr']
        metrics_history['lr'].append(current_lr)
        
        print(f"\n{Fore.MAGENTA}{Style.BRIGHT}🔄 第 {epoch+1}/{num_epochs} 轮训练开始 - {datetime.now().strftime('%H:%M:%S')} 🔄{Style.RESET_ALL}")
        print(f"{Fore.CYAN}当前学习率: {current_lr:.6f}{Style.RESET_ALL}")
        
        # 记录内存使用情况
        if torch.cuda.is_available():
            mem_allocated = torch.cuda.memory_allocated(0) / (1024**3)
            mem_reserved = torch.cuda.memory_reserved(0) / (1024**3)
            print(f"{Fore.WHITE}GPU内存: 已分配 {mem_allocated:.2f} GB, 已保留 {mem_reserved:.2f} GB, 占用率 {mem_allocated/torch.cuda.get_device_properties(0).total_memory*1024**3*100:.2f}%{Style.RESET_ALL}")
        
        # 每个epoch有训练和验证阶段
        for phase in ['train', 'val']:
            if phase == 'train':
                model.train()  # 设置模型为训练模式
            else:
                model.eval()   # 设置模型为评估模式
            
            running_loss = 0.0
            running_corrects = 0
            
            # 遍历数据
            phase_color = Fore.GREEN if phase == 'train' else Fore.BLUE
            desc = f"{phase_color}{Style.BRIGHT}{phase.lower()}{Style.RESET_ALL}"
            progress_bar = tqdm(
                dataloaders[phase], 
                desc=desc,
                bar_format="{desc}: {percentage:3.0f}%|{bar:30}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}] {postfix}",
                ncols=120
            )
            
            batch_losses = []
            batch_accs = []
            
            for inputs, labels in progress_bar:
                inputs = inputs.to(device)
                labels = labels.to(device)
                
                # 清零参数梯度
                optimizer.zero_grad()
                
                # 前向传播
                # 只在训练阶段跟踪历史
                with torch.set_grad_enabled(phase == 'train'):
                    outputs = model(inputs)
                    _, preds = torch.max(outputs, 1)
                    loss = criterion(outputs, labels)
                    
                    # 只在训练阶段进行反向传播和优化
                    if phase == 'train':
                        loss.backward()
                        # 添加梯度裁剪，防止梯度爆炸
                        torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
                        optimizer.step()
                
                # 统计
                batch_loss = loss.item()
                batch_acc = torch.sum(preds == labels.data).double() / inputs.size(0)
                running_loss += batch_loss * inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)
                
                # 记录每个批次的损失和准确率
                batch_losses.append(batch_loss)
                batch_accs.append(batch_acc.item())
                
                # 更新进度条，只显示当前批次的损失和准确率
                metrics_str = (
                    f"Loss={Fore.YELLOW}{batch_loss:.4f}{Style.RESET_ALL}, "
                    f"Acc={Fore.CYAN}{batch_acc*100:.2f}%{Style.RESET_ALL}"
                )
                progress_bar.set_postfix_str(metrics_str)
            
            # 在训练阶段更新学习率
            if phase == 'train':
                scheduler.step()
            
            # 计算epoch的损失和准确率
            epoch_loss = running_loss / dataset_sizes[phase]
            epoch_acc = running_corrects.double() / dataset_sizes[phase]
            
            # 计算批次级别的统计信息
            batch_loss_mean = np.mean(batch_losses)
            batch_loss_std = np.std(batch_losses)
            batch_acc_mean = np.mean(batch_accs)
            batch_acc_std = np.std(batch_accs)
            
            # 记录训练和验证的损失和准确率
            if phase == 'train':
                metrics_history['train_loss'].append(float(epoch_loss))
                metrics_history['train_acc'].append(float(epoch_acc))
            else:
                metrics_history['val_loss'].append(float(epoch_loss))
                metrics_history['val_acc'].append(float(epoch_acc))
            
            # 打印epoch的结果
            phase_emoji = "🔥" if phase == 'train' else "🔍"
            print(f"\n{phase_color}{Style.BRIGHT}{phase_emoji} {phase.upper()} 结果:{Style.RESET_ALL} Loss: {Fore.YELLOW}{epoch_loss:.4f}{Style.RESET_ALL}, Acc: {Fore.CYAN}{epoch_acc*100:.2f}%{Style.RESET_ALL}")
            print(f"{phase_color}批次统计:{Style.RESET_ALL} Loss(μ±σ): {Fore.YELLOW}{batch_loss_mean:.4f}±{batch_loss_std:.4f}{Style.RESET_ALL}, Acc(μ±σ): {Fore.CYAN}{batch_acc_mean*100:.2f}%±{batch_acc_std*100:.2f}%{Style.RESET_ALL}")
            
            # 如果是验证阶段且准确率提高，保存最佳模型
            if phase == 'val' and epoch_acc > best_acc + min_delta:
                best_acc = epoch_acc
                best_model_wts = copy.deepcopy(model.state_dict())
                # 保存最佳模型到文件
                best_model_path = os.path.join(save_dir, 'best_model.pth')
                torch.save(model.state_dict(), best_model_path)
                print(f"{Fore.GREEN}✓ 发现更好的模型! 已保存到: {best_model_path}{Style.RESET_ALL}")
                early_stop_counter = 0
            elif phase == 'val':
                early_stop_counter += 1
                if early_stop_counter >= early_stop_patience:
                    print(f"{Fore.RED}{Style.BRIGHT}⚠ 早停: {early_stop_patience} 轮未改善，停止训练{Style.RESET_ALL}")
                    # 提前结束训练循环
                    break
        
        # 记录本轮训练时间
        epoch_time = time.time() - epoch_start_time
        metrics_history['epoch_time'].append(epoch_time)
        
        # 打印本轮训练摘要
        epoch_summary = [
            ["训练损失", f"{Fore.YELLOW}{metrics_history['train_loss'][-1]:.4f}{Style.RESET_ALL}"],
            ["训练准确率", f"{Fore.YELLOW}{metrics_history['train_acc'][-1]*100:.2f}%{Style.RESET_ALL}"],
            ["验证损失", f"{Fore.YELLOW}{metrics_history['val_loss'][-1]:.4f}{Style.RESET_ALL}"],
            ["验证准确率", f"{Fore.YELLOW}{metrics_history['val_acc'][-1]*100:.2f}%{Style.RESET_ALL}"],
            ["学习率", f"{Fore.MAGENTA}{current_lr:.6f}{Style.RESET_ALL}"],
            ["轮次用时", f"{epoch_time//60:.0f}分 {epoch_time%60:.0f}秒"],
            ["早停计数", f"{early_stop_counter}/{early_stop_patience}"]
        ]
        
        print(f"\n{Fore.GREEN}{Style.BRIGHT}📈 第 {epoch+1} 轮指标摘要 📈{Style.RESET_ALL}")
        print(tabulate(epoch_summary, headers=["指标", "数值"], tablefmt="fancy_grid"))
        print(f"{Fore.YELLOW}最佳验证准确率: {best_acc*100:.2f}% (轮次 {metrics_history['val_acc'].index(max(metrics_history['val_acc']))+1}){Style.RESET_ALL}")
        
        # 如果早停触发，跳出训练循环
        if early_stop_counter >= early_stop_patience:
            print(f"{Fore.RED}{Style.BRIGHT}⚠ 训练提前终止 - 早停条件已满足{Style.RESET_ALL}")
            break
        
        # 每5轮或最后一轮绘制并保存训练历史图表
        if (epoch + 1) % 5 == 0 or epoch == num_epochs - 1 or early_stop_counter >= early_stop_patience:
            plt.figure(figsize=(15, 10))
            
            # 损失曲线
            plt.subplot(2, 2, 1)
            plt.plot(metrics_history['train_loss'], label='训练损失', color='#2ecc71', marker='o')
            plt.plot(metrics_history['val_loss'], label='验证损失', color='#e74c3c', marker='s')
            plt.title('损失曲线')
            plt.xlabel('轮次')
            plt.ylabel('损失')
            plt.legend()
            plt.grid(True, linestyle='--', alpha=0.7)
            
            # 准确率曲线
            plt.subplot(2, 2, 2)
            plt.plot([x*100 for x in metrics_history['train_acc']], label='训练准确率', color='#2ecc71', marker='o')
            plt.plot([x*100 for x in metrics_history['val_acc']], label='验证准确率', color='#e74c3c', marker='s')
            plt.title('准确率曲线')
            plt.xlabel('轮次')
            plt.ylabel('准确率 (%)')
            plt.legend()
            plt.grid(True, linestyle='--', alpha=0.7)
            
            # 学习率曲线
            plt.subplot(2, 2, 3)
            plt.plot(metrics_history['lr'], label='学习率', color='#9b59b6', marker='d')
            plt.title('学习率变化')
            plt.xlabel('轮次')
            plt.ylabel('学习率')
            plt.grid(True, linestyle='--', alpha=0.7)
            
            # 训练时间曲线
            plt.subplot(2, 2, 4)
            plt.plot(metrics_history['epoch_time'], label='训练时间', color='#3498db', marker='x')
            plt.title('每轮训练时间')
            plt.xlabel('轮次')
            plt.ylabel('时间 (秒)')
            plt.grid(True, linestyle='--', alpha=0.7)
            
            plt.tight_layout()
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            save_path = os.path.join(runs_dir, f'training_history_{epoch+1}.png')
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
            plt.close()
            
            print(f"\n{Fore.GREEN}✓ 训练历史图表已保存到: {save_path}{Style.RESET_ALL}")
            
            # 保存训练指标到JSON文件
            metrics_json = {
                'train_loss': [float(x) for x in metrics_history['train_loss']],
                'val_loss': [float(x) for x in metrics_history['val_loss']],
                'train_acc': [float(x) for x in metrics_history['train_acc']],
                'val_acc': [float(x) for x in metrics_history['val_acc']],
                'learning_rate': [float(x) for x in metrics_history['lr']],
                'epoch_time': [float(x) for x in metrics_history['epoch_time']],
                'best_epoch': metrics_history['val_acc'].index(max(metrics_history['val_acc'])),
                'best_acc': float(best_acc)
            }
            
            import json
            with open(os.path.join(runs_dir, f'metrics_history_{epoch+1}.json'), 'w') as f:
                json.dump(metrics_json, f, indent=4)
            
            print(f"{Fore.GREEN}✓ 训练指标已保存到: {os.path.join(runs_dir, f'metrics_history_{epoch+1}.json')}{Style.RESET_ALL}")
    
    # 计算训练时间
    time_elapsed = time.time() - since
    print(f"\n{Fore.GREEN}{Style.BRIGHT}✅ 训练完成! ✅{Style.RESET_ALL}")
    print(f"{Fore.YELLOW}训练用时: {time_elapsed // 3600:.0f}小时 {(time_elapsed % 3600) // 60:.0f}分 {time_elapsed % 60:.0f}秒{Style.RESET_ALL}")
    print(f"{Fore.CYAN}最佳验证准确率: {best_acc*100:.2f}%{Style.RESET_ALL}")
    
    # 加载最佳模型权重
    model.load_state_dict(best_model_wts)
    
    return model, metrics_history

def main():
    # 设置随机种子以确保结果可重现
    set_seed(42)
    
    # 启用cudnn基准测试以加速训练
    torch.backends.cudnn.benchmark = True
    # 启用确定性，确保结果可复现
    torch.backends.cudnn.deterministic = True
    
    # 检查是否有可用的GPU
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"{Fore.YELLOW}使用设备: {Fore.GREEN}{device}{Style.RESET_ALL}")
    
    # 数据目录
    data_dir = os.path.join(os.getcwd(), 'images')
    
    # 获取分类数量
    classes = get_classes(data_dir)
    num_classes = len(classes)
    print(f"{Fore.YELLOW}分类数量: {Fore.GREEN}{num_classes}{Style.RESET_ALL}")
    
    # 获取数据加载器 - 对于ResNet18，使用适中的批次大小
    batch_size = 128  # 使用较大的批次大小，但不过大以避免内存问题
    
    print(f"\n{Fore.CYAN}{Style.BRIGHT}📊 数据加载 📊{Style.RESET_ALL}")
    print(f"{Fore.WHITE}• 批次大小(训练): {Fore.CYAN}{batch_size}{Style.RESET_ALL}")
    print(f"{Fore.WHITE}• 批次大小(验证/测试): {Fore.CYAN}64{Style.RESET_ALL}")
    print(f"{Fore.WHITE}• 数据目录: {Fore.CYAN}{data_dir}{Style.RESET_ALL}")
    
    # 显示数据加载进度条
    print(f"\n{Fore.GREEN}>>> {Style.BRIGHT}数据处理: 训练集{Style.RESET_ALL}")
    train_loader, train_data_len = get_data_loaders(data_dir, batch_size, train=True)
    print(f"{Fore.GREEN}>>> {Style.BRIGHT}数据处理: 验证集和测试集{Style.RESET_ALL}")
    val_loader, test_loader, valid_data_len, test_data_len = get_data_loaders(data_dir, 64)
    
    # 创建数据加载器字典
    dataloaders = {
        'train': train_loader,
        'val': val_loader,
        'test': test_loader
    }
    
    # 创建数据集大小字典
    dataset_sizes = {
        'train': train_data_len,
        'val': valid_data_len,
        'test': test_data_len
    }
    
    # 显示数据集摘要
    data_info = [
        ["训练集大小", f"{train_data_len}"],
        ["验证集大小", f"{valid_data_len}"],
        ["测试集大小", f"{test_data_len}"],
        ["总样本数", f"{train_data_len + valid_data_len + test_data_len}"],
        ["类别数量", f"{num_classes}"]
    ]
    
    print(f"\n{Fore.BLUE}{Style.BRIGHT}📊 数据摘要 📊{Style.RESET_ALL}\n")
    print(tabulate(data_info, headers=["指标", "数值"], tablefmt="fancy_grid"))
    
    # 创建模型
    print(f"\n{Fore.MAGENTA}{Style.BRIGHT}🔧 创建模型 🔧{Style.RESET_ALL}")
    model = get_model(num_classes)
    model = model.to(device)
    
    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss(label_smoothing=0.1)
    criterion = criterion.to(device)
    
    optimizer = optim.AdamW(model.parameters(), lr=0.0005, weight_decay=0.01)
    
    scheduler = StepLR(
        optimizer, 
        step_size=4,
        gamma=0.96
    )
    
    # 训练模型
    num_epochs = 30
    model, metrics_history = train_model(
        model, criterion, optimizer, scheduler, dataloaders, 
        dataset_sizes, device, num_epochs=num_epochs
    )
    
    # 保存最终模型
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    final_model_path = f'final_model_{timestamp}.pth'
    torch.save(model.state_dict(), final_model_path)
    print(f"\n{Fore.GREEN}✓ 最终模型已保存为: {final_model_path}{Style.RESET_ALL}")
    
    # 清理GPU缓存
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
    
    # 在测试集上评估模型
    print(f"\n{Fore.CYAN}{Style.BRIGHT}🧪 在测试集上评估模型 🧪{Style.RESET_ALL}")
    print(f"{Fore.YELLOW}{'='*100}{Style.RESET_ALL}")
    print(f"{Fore.WHITE}开始时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}{Style.RESET_ALL}")
    
    test_loss = 0.0
    class_correct = list(0. for i in range(num_classes))
    class_total = list(0. for i in range(num_classes))
    
    # 创建混淆矩阵
    confusion_matrix = torch.zeros(num_classes, num_classes)
    
    model.eval()
    
    # 添加进度条，并显示实时准确率和损失
    test_pbar = tqdm(
        dataloaders['test'],
        desc=f"{Fore.BLUE}{Style.BRIGHT}测试进度{Style.RESET_ALL}",
        bar_format="{desc}: {percentage:3.0f}%|{bar:30}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}] {postfix}",
        ncols=120
    )
    
    total_correct = 0
    total_samples = 0
    
    # 记录每个批次的指标
    batch_losses = []
    batch_accs = []
    
    with torch.no_grad():
        for inputs, labels in test_pbar:
            inputs = inputs.to(device)
            labels = labels.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            
            # 更新测试损失
            batch_loss = loss.item()
            test_loss += batch_loss * inputs.size(0)
            
            # 计算准确率
            _, preds = torch.max(outputs, 1)
            correct = (preds == labels).sum().item()
            total_correct += correct
            total_samples += labels.size(0)
            
            # 记录批次指标
            batch_acc = correct / inputs.size(0)
            batch_losses.append(batch_loss)
            batch_accs.append(batch_acc)
            
            # 更新混淆矩阵
            for t, p in zip(labels.view(-1), preds.view(-1)):
                confusion_matrix[t.long(), p.long()] += 1
            
            # 更新每个类别的准确率
            c = (preds == labels).squeeze()
            for i in range(labels.size(0)):
                label = labels[i]
                class_correct[label] += c[i].item()
                class_total[label] += 1
            
            # 更新进度条
            current_acc = total_correct / total_samples
            current_loss = test_loss / total_samples
            
            # 使用彩色输出
            metrics_str = (
                f"Loss={Fore.YELLOW}{batch_loss:.4f}{Style.RESET_ALL}, "
                f"Acc={Fore.CYAN}{batch_acc*100:.2f}%{Style.RESET_ALL}, "
                f"Avg Loss={Fore.YELLOW}{current_loss:.4f}{Style.RESET_ALL}, "
                f"Avg Acc={Fore.CYAN}{current_acc*100:.2f}%{Style.RESET_ALL}"
            )
            test_pbar.set_postfix_str(metrics_str)
    
    # 计算总体测试准确率和损失
    test_loss = test_loss / dataset_sizes['test']
    test_acc = total_correct / dataset_sizes['test']
    
    # 计算批次级别的统计信息
    batch_loss_mean = np.mean(batch_losses)
    batch_loss_std = np.std(batch_losses)
    batch_acc_mean = np.mean(batch_accs)
    batch_acc_std = np.std(batch_accs)
    
    # 显示测试结果摘要
    test_summary = [
        ["测试集大小", f"{dataset_sizes['test']}"],
        ["总体损失", f"{Fore.YELLOW}{test_loss:.4f}{Style.RESET_ALL}"],
        ["总体准确率", f"{Fore.CYAN}{test_acc*100:.2f}%{Style.RESET_ALL}"],
        ["批次平均损失", f"{Fore.YELLOW}{batch_loss_mean:.4f} ± {batch_loss_std:.4f}{Style.RESET_ALL}"],
        ["批次平均准确率", f"{Fore.CYAN}{batch_acc_mean*100:.2f}% ± {batch_acc_std*100:.2f}%{Style.RESET_ALL}"],
        ["测试用时", f"{test_pbar.format_dict['elapsed']:.2f}秒"],
        ["每批次平均用时", f"{test_pbar.format_dict['elapsed']/len(dataloaders['test']):.4f}秒/批"]
    ]
    
    print(f"\n{Fore.GREEN}{Style.BRIGHT}📊 测试结果摘要 📊{Style.RESET_ALL}\n")
    print(tabulate(test_summary, headers=["指标", "数值"], tablefmt="fancy_grid"))
    
    # 显示每个类别的准确率（仅显示前10个和后10个）
    print(f"\n{Fore.MAGENTA}{Style.BRIGHT}📊 各类别准确率 📊{Style.RESET_ALL}")
    
    class_acc = []
    for i in range(num_classes):
        if class_total[i] > 0:
            accuracy = class_correct[i] / class_total[i]
            class_acc.append((i, accuracy, class_total[i], classes[i] if i < len(classes) else f"类别{i}"))
    
    # 按准确率排序
    class_acc.sort(key=lambda x: x[1], reverse=True)
    
    # 创建表格数据
    top_classes = class_acc[:10]  # 前10个类别
    bottom_classes = class_acc[-10:]  # 后10个类别
    
    # 显示前10个类别
    top_table = []
    for idx, acc, total, class_name in top_classes:
        top_table.append([
            idx, 
            class_name,
            f"{Fore.GREEN}{acc*100:.2f}%{Style.RESET_ALL}", 
            total, 
            f"{int(total * acc)}/{total}"
        ])
    
    print(f"\n{Fore.GREEN}{Style.BRIGHT}准确率最高的10个类别:{Style.RESET_ALL}")
    print(tabulate(
        top_table, 
        headers=["类别ID", "类别名称", "准确率", "样本数", "正确/总数"], 
        tablefmt="fancy_grid"
    ))
    
    # 显示后10个类别
    bottom_table = []
    for idx, acc, total, class_name in bottom_classes:
        bottom_table.append([
            idx, 
            class_name,
            f"{Fore.RED}{acc*100:.2f}%{Style.RESET_ALL}", 
            total, 
            f"{int(total * acc)}/{total}"
        ])
    
    print(f"\n{Fore.RED}{Style.BRIGHT}准确率最低的10个类别:{Style.RESET_ALL}")
    print(tabulate(
        bottom_table, 
        headers=["类别ID", "类别名称", "准确率", "样本数", "正确/总数"], 
        tablefmt="fancy_grid"
    ))
    
    # 计算并显示混淆矩阵相关指标
    print(f"\n{Fore.BLUE}{Style.BRIGHT}📊 混淆矩阵分析 📊{Style.RESET_ALL}")
    
    # 计算每个类别的精确率和召回率
    precision = torch.zeros(num_classes)
    recall = torch.zeros(num_classes)
    f1_score = torch.zeros(num_classes)
    
    for i in range(num_classes):
        precision[i] = confusion_matrix[i, i] / confusion_matrix[:, i].sum() if confusion_matrix[:, i].sum() > 0 else 0
        recall[i] = confusion_matrix[i, i] / confusion_matrix[i, :].sum() if confusion_matrix[i, :].sum() > 0 else 0
        f1_score[i] = 2 * precision[i] * recall[i] / (precision[i] + recall[i]) if (precision[i] + recall[i]) > 0 else 0
    
    # 计算宏平均和微平均
    macro_precision = precision.mean().item()
    macro_recall = recall.mean().item()
    macro_f1 = f1_score.mean().item()
    
    micro_precision = total_correct / total_samples
    micro_recall = total_correct / total_samples
    micro_f1 = 2 * micro_precision * micro_recall / (micro_precision + micro_recall) if (micro_precision + micro_recall) > 0 else 0
    
    # 显示混淆矩阵相关指标
    confusion_metrics = [
        ["宏平均精确率", f"{Fore.CYAN}{macro_precision*100:.2f}%{Style.RESET_ALL}"],
        ["宏平均召回率", f"{Fore.CYAN}{macro_recall*100:.2f}%{Style.RESET_ALL}"],
        ["宏平均F1分数", f"{Fore.CYAN}{macro_f1*100:.2f}%{Style.RESET_ALL}"],
        ["微平均精确率", f"{Fore.YELLOW}{micro_precision*100:.2f}%{Style.RESET_ALL}"],
        ["微平均召回率", f"{Fore.YELLOW}{micro_recall*100:.2f}%{Style.RESET_ALL}"],
        ["微平均F1分数", f"{Fore.YELLOW}{micro_f1*100:.2f}%{Style.RESET_ALL}"]
    ]
    
    print(tabulate(confusion_metrics, headers=["指标", "数值"], tablefmt="fancy_grid"))
    
    # 保存混淆矩阵为热力图
    plt.figure(figsize=(12, 10))
    plt.imshow(confusion_matrix.numpy(), cmap='Blues')
    plt.colorbar()
    plt.title('混淆矩阵热力图')
    plt.xlabel('预测类别')
    plt.ylabel('真实类别')
    plt.tight_layout()
    
    confusion_matrix_path = os.path.join(os.getcwd(), 'runs', f"confusion_matrix_{timestamp}.png")
    plt.savefig(confusion_matrix_path, dpi=300, bbox_inches='tight')
    plt.close()
    
    print(f"\n{Fore.GREEN}✓ 混淆矩阵热力图已保存到: {confusion_matrix_path}{Style.RESET_ALL}")
    
    # 绘制并保存测试结果图表
    plt.figure(figsize=(15, 10))
    
    # 绘制前20个类别的准确率条形图
    plt.subplot(2, 1, 1)
    top20_classes = class_acc[:20]
    class_ids = [x[0] for x in top20_classes]
    class_accs = [x[1] * 100 for x in top20_classes]
    class_names = [x[3] for x in top20_classes]
    
    bars = plt.bar(range(len(class_ids)), class_accs, color='skyblue')
    plt.xticks(range(len(class_ids)), class_ids, rotation=45)
    plt.title('前20个类别的准确率')
    plt.xlabel('类别ID')
    plt.ylabel('准确率 (%)')
    plt.ylim(0, 100)
    
    # 在条形上方添加准确率标签
    for i, bar in enumerate(bars):
        plt.text(bar.get_x() + bar.get_width()/2, bar.get_height() + 1, 
                f'{class_accs[i]:.1f}%', ha='center', va='bottom', rotation=0, fontsize=8)
    
    # 绘制后20个类别的准确率条形图
    plt.subplot(2, 1, 2)
    bottom20_classes = class_acc[-20:]
    class_ids = [x[0] for x in bottom20_classes]
    class_accs = [x[1] * 100 for x in bottom20_classes]
    class_names = [x[3] for x in bottom20_classes]
    
    bars = plt.bar(range(len(class_ids)), class_accs, color='salmon')
    plt.xticks(range(len(class_ids)), class_ids, rotation=45)
    plt.title('后20个类别的准确率')
    plt.xlabel('类别ID')
    plt.ylabel('准确率 (%)')
    plt.ylim(0, 100)
    
    # 在条形上方添加准确率标签
    for i, bar in enumerate(bars):
        plt.text(bar.get_x() + bar.get_width()/2, bar.get_height() + 1, 
                f'{class_accs[i]:.1f}%', ha='center', va='bottom', rotation=0, fontsize=8)
    
    plt.tight_layout()
    test_results_path = os.path.join(os.getcwd(), 'runs', f"test_results_{timestamp}.png")
    plt.savefig(test_results_path, dpi=300, bbox_inches='tight')
    plt.close()
    
    print(f"{Fore.GREEN}✓ 测试结果图表已保存到: {test_results_path}{Style.RESET_ALL}")
    
    # 生成并保存详细的测试报告
    test_report = {
        "timestamp": timestamp,
        "test_accuracy": float(test_acc),
        "test_loss": float(test_loss),
        "class_accuracies": {i: float(acc) for i, (idx, acc, _, _) in enumerate(class_acc)},
        "top_classes": [{"id": idx, "name": name, "accuracy": float(acc), "samples": int(total)} 
                       for idx, acc, total, name in top_classes],
        "bottom_classes": [{"id": idx, "name": name, "accuracy": float(acc), "samples": int(total)} 
                          for idx, acc, total, name in bottom_classes],
        "macro_precision": float(macro_precision),
        "macro_recall": float(macro_recall),
        "macro_f1": float(macro_f1),
        "micro_precision": float(micro_precision),
        "micro_recall": float(micro_recall),
        "micro_f1": float(micro_f1)
    }
    
    import json
    test_report_path = os.path.join(os.getcwd(), 'runs', f"test_report_{timestamp}.json")
    with open(test_report_path, 'w') as f:
        json.dump(test_report, f, indent=4)
    
    print(f"{Fore.GREEN}✓ 详细测试报告已保存到: {test_report_path}{Style.RESET_ALL}")
    
    # 打印测试完成信息
    print(f"\n{Fore.CYAN}{Style.BRIGHT}✅ 测试评估完成! ✅{Style.RESET_ALL}")
    print(f"{Fore.YELLOW}{'='*100}{Style.RESET_ALL}")
    print(f"{Fore.WHITE}结束时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}{Style.RESET_ALL}")
    
    # 打印总结报告
    print(f"\n{Fore.MAGENTA}{Style.BRIGHT}🏆 模型性能总结 🏆{Style.RESET_ALL}")
    # 从 metrics_history 中获取最佳验证准确率
    best_acc = max(metrics_history['val_acc'])
    
    summary_table = [
        ["训练集准确率", f"{Fore.CYAN}{metrics_history['train_acc'][-1]*100:.2f}%{Style.RESET_ALL}"],
        ["验证集准确率", f"{Fore.CYAN}{metrics_history['val_acc'][-1]*100:.2f}%{Style.RESET_ALL}"],
        ["测试集准确率", f"{Fore.CYAN}{test_acc*100:.2f}%{Style.RESET_ALL}"],
        ["最佳验证准确率", f"{Fore.GREEN}{best_acc*100:.2f}%{Style.RESET_ALL}"],
        ["最佳验证轮次", f"{metrics_history['val_acc'].index(max(metrics_history['val_acc']))+1}"],
        ["总训练轮数", f"{len(metrics_history['train_loss'])}"],
        ["总训练时间", f"{time_elapsed // 3600:.0f}小时 {(time_elapsed % 3600) // 60:.0f}分 {time_elapsed % 60:.0f}秒"],
        ["模型保存路径", f"{final_model_path}"],
        ["最佳模型保存路径", f"{os.path.join(os.getcwd(), 'model', 'best_model.pth')}"]
    ]
    
    print(tabulate(summary_table, headers=["指标", "数值"], tablefmt="fancy_grid"))
    
    print(f"\n{Fore.GREEN}{Style.BRIGHT}🎉 任务完成! 🎉{Style.RESET_ALL}")

if __name__ == "__main__":
    main()