import os
import json
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
from model import DogClassifier
from tqdm import tqdm
import matplotlib.pyplot as plt
from PIL import Image
import shutil
import time
from datetime import datetime
from colorama import Fore, Style, init
from tabulate import tabulate
import psutil

# 初始化 colorama
init(autoreset=True)

def train_model(model, train_loader, val_loader, criterion, optimizer, scheduler, num_epochs=30, device='cuda', patience=10):
    best_acc = 0.0
    train_losses = []
    val_losses = []
    train_accs = []
    val_accs = []
    start_time = time.time()
    
    # 早停机制相关变量
    counter = 0
    early_stop = False
    
    # 解冻最后3个卷积块进行微调
    print(f"{Fore.CYAN}{Style.BRIGHT}解冻最后3个卷积块进行微调...{Style.RESET_ALL}")
    model.unfreeze_features(num_layers=3)
    
    # 确保model文件夹存在
    os.makedirs('model', exist_ok=True)
    
    # 打印训练开始的标题
    print("\n" + "="*80)
    print(f"{Fore.CYAN}{Style.BRIGHT}✨ 狗品种识别训练任务 ✨{Style.RESET_ALL}".center(90))
    print(f"{Fore.YELLOW}模型: {Style.BRIGHT}ResNet101{Style.RESET_ALL}".center(80))
    # print(f"{Fore.YELLOW}开始时间: {Style.BRIGHT}{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}{Style.RESET_ALL}".center(95))
    print(f"{Fore.YELLOW}总轮次: {Style.BRIGHT}{num_epochs}{Style.RESET_ALL}".center(80))
    print(f"{Fore.YELLOW}早停耐心值: {Style.BRIGHT}{patience}{Style.RESET_ALL}".center(80))
    print("="*80 + "\n")
    
    # 打印系统资源信息
    system_info = {
        "CPU使用率": f"{psutil.cpu_percent()}%",
        "内存使用率": f"{psutil.virtual_memory().percent}%",
        "可用内存": f"{psutil.virtual_memory().available / (1024**3):.2f} GB",
        "总内存": f"{psutil.virtual_memory().total / (1024**3):.2f} GB"
    }
    
    if torch.cuda.is_available():
        system_info["GPU设备"] = torch.cuda.get_device_name(0)
        system_info["GPU显存总量"] = f"{torch.cuda.get_device_properties(0).total_memory / (1024**3):.2f} GB"
        system_info["GPU显存使用"] = f"{torch.cuda.memory_allocated(0) / (1024**3):.2f} GB"
        system_info["GPU显存占用率"] = f"{torch.cuda.memory_allocated(0) / torch.cuda.get_device_properties(0).total_memory * 100:.2f}%"
    
    print(f"\n{Fore.CYAN}{Style.BRIGHT}🖥️ 系统资源信息 🖥️{Style.RESET_ALL}\n")
    system_table = [[k, v] for k, v in system_info.items()]
    print(tabulate(system_table, headers=["指标", "数值"], tablefmt="fancy_grid"))
    print("\n🚀 开始训练 🚀\n")
    
    for epoch in range(num_epochs):
        epoch_start_time = time.time()
        # 训练阶段
        model.train()
        running_loss = 0.0
        correct = 0
        total = 0
        
        train_bar = tqdm(train_loader, desc=f'{Fore.CYAN}Epoch [{epoch+1}/{num_epochs}] 训练{Style.RESET_ALL}')
        for inputs, labels in train_bar:
            inputs, labels = inputs.to(device), labels.to(device)
            
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            
            # 梯度裁剪，防止梯度爆炸
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            
            optimizer.step()
            
            running_loss += loss.item()
            _, predicted = outputs.max(1)
            total += labels.size(0)
            correct += predicted.eq(labels).sum().item()
            
            train_bar.set_postfix({
                'Loss': f'{loss.item():.4f}',
                'Acc': f'{100.*correct/total:.2f}%',
                'LR': f'{scheduler.get_last_lr()[0]:.6f}'
            })
        
        # 更新学习率
        scheduler.step()
        
        epoch_loss = running_loss / len(train_loader)
        epoch_acc = 100. * correct / total
        train_losses.append(epoch_loss)
        train_accs.append(epoch_acc)
        
        # 验证阶段
        model.eval()
        val_loss = 0.0
        correct = 0
        total = 0
        
        with torch.no_grad():
            val_bar = tqdm(val_loader, desc=f'{Fore.YELLOW}Epoch [{epoch+1}/{num_epochs}] 验证{Style.RESET_ALL}')
            for inputs, labels in val_bar:
                inputs, labels = inputs.to(device), labels.to(device)
                outputs = model(inputs)
                loss = criterion(outputs, labels)
                
                val_loss += loss.item()
                _, predicted = outputs.max(1)
                total += labels.size(0)
                correct += predicted.eq(labels).sum().item()
                
                val_bar.set_postfix({
                    'Loss': f'{loss.item():.4f}',
                    'Acc': f'{100.*correct/total:.2f}%'
                })
        
        val_loss = val_loss / len(val_loader)
        val_acc = 100. * correct / total
        val_losses.append(val_loss)
        val_accs.append(val_acc)
        
        # 计算本轮用时
        epoch_time = time.time() - epoch_start_time
        
        # 打印epoch结果，使用表格美化
        epoch_info = [
            ["训练损失", f"{Fore.CYAN}{epoch_loss:.4f}{Style.RESET_ALL}"],
            ["训练准确率", f"{Fore.CYAN}{epoch_acc:.2f}%{Style.RESET_ALL}"],
            ["验证损失", f"{Fore.YELLOW}{val_loss:.4f}{Style.RESET_ALL}"],
            ["验证准确率", f"{Fore.YELLOW}{val_acc:.2f}%{Style.RESET_ALL}"],
            ["学习率", f"{scheduler.get_last_lr()[0]:.6f}"],
            ["本轮用时", f"{epoch_time:.2f}秒"]
        ]
        
        print(f"\n{Fore.GREEN}{Style.BRIGHT}📈 第 {epoch+1}/{num_epochs} 轮结果 📈{Style.RESET_ALL}")
        print(tabulate(epoch_info, headers=["指标", "数值"], tablefmt="fancy_grid"))
        
        # 保存最佳模型
        if val_acc > best_acc:
            best_acc = val_acc
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'scheduler_state_dict': scheduler.state_dict(),
                'best_acc': best_acc,
            }, os.path.join('model', 'best_model.pth'))
            print(f"{Fore.GREEN}{Style.BRIGHT}✨ 保存最佳模型 (验证准确率: {best_acc:.2f}%){Style.RESET_ALL}")
            # 重置早停计数器
            counter = 0
        else:
            # 如果验证准确率没有提高，计数器加1
            counter += 1
            print(f"{Fore.YELLOW}验证准确率未提高，早停计数: {counter}/{patience}{Style.RESET_ALL}")
            
        # 早停检查
        if counter >= patience:
            print(f"{Fore.RED}{Style.BRIGHT}🛑 早停触发! 验证准确率已连续 {patience} 轮未提高{Style.RESET_ALL}")
            early_stop = True
            break
    
    # 训练完成，显示总结信息
    total_time = time.time() - start_time
    hours, remainder = divmod(total_time, 3600)
    minutes, seconds = divmod(remainder, 60)
    
    print("\n" + "="*80)
    print(f"{Fore.GREEN}{Style.BRIGHT}🎉 训练完成! 🎉{Style.RESET_ALL}".center(90))
    if early_stop:
        print(f"{Fore.YELLOW}提前停止于第 {epoch+1}/{num_epochs} 轮{Style.RESET_ALL}".center(85))
    print(f"{Fore.YELLOW}最佳验证准确率: {Style.BRIGHT}{best_acc:.2f}%{Style.RESET_ALL}".center(85))
    print(f"{Fore.YELLOW}总训练时间: {Style.BRIGHT}{int(hours)}小时 {int(minutes)}分钟 {seconds:.2f}秒{Style.RESET_ALL}".center(95))
    print(f"{Fore.YELLOW}模型保存路径: {Style.BRIGHT}{os.path.abspath('model/best_model.pth')}{Style.RESET_ALL}".center(110))
    print("="*80 + "\n")
    
    return train_losses, train_accs, val_losses, val_accs

def plot_training_history(train_losses, train_accs, val_losses, val_accs):
    plt.figure(figsize=(12, 4))
    
    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label='Train Loss')
    plt.plot(val_losses, label='Val Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.title('Training and Validation Loss')
    
    plt.subplot(1, 2, 2)
    plt.plot(train_accs, label='Train Acc')
    plt.plot(val_accs, label='Val Acc')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.legend()
    plt.title('Training and Validation Accuracy')
    
    plt.tight_layout()
    plt.savefig('training_history.png')
    plt.close()

def main():
    # 检查CUDA是否可用
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f'使用设备: {device}')
    
    # 清理runs文件夹
    runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs')
    if os.path.exists(runs_dir):
        print(f"清理runs文件夹: {runs_dir}")
        shutil.rmtree(runs_dir)
    os.makedirs(runs_dir, exist_ok=True)
    
    # 确保model文件夹存在
    model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'model')
    os.makedirs(model_dir, exist_ok=True)
    print(f"模型将保存在: {model_dir}")
    
    # 数据预处理和增强
    train_transform = transforms.Compose([
        transforms.Resize((256, 256)),
        transforms.RandomCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(15),
        transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    
    val_transform = transforms.Compose([
        transforms.Resize((256, 256)),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    
    # 加载数据集
    data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dog_data')
    print(f"数据集路径: {data_dir}")
    
    train_dir = os.path.join(data_dir, 'train')
    val_dir = os.path.join(data_dir, 'val')
    
    # 检查目录是否存在
    if not os.path.exists(train_dir) or not os.path.exists(val_dir):
        raise FileNotFoundError(f"训练集或验证集目录不存在！\n训练集路径: {train_dir}\n验证集路径: {val_dir}")
    
    # 检查目录中是否有类别文件夹
    train_classes = [d for d in os.listdir(train_dir) if os.path.isdir(os.path.join(train_dir, d))]
    if not train_classes:
        raise FileNotFoundError(f"训练集目录中没有找到任何类别文件夹！")
    
    print(f"发现的类别数量: {len(train_classes)}")
    print("类别列表:")
    for cls in train_classes:
        print(f"- {cls}")
    
    train_dataset = datasets.ImageFolder(train_dir, transform=train_transform)
    val_dataset = datasets.ImageFolder(val_dir, transform=val_transform)
    
    print(f"\n数据集信息:")
    print(f"训练集大小: {len(train_dataset)}")
    print(f"验证集大小: {len(val_dataset)}")
    print(f"类别数量: {len(train_dataset.classes)}")
    
    # 保存类别名称
    class_names = {i: class_name for i, class_name in enumerate(train_dataset.classes)}
    with open('class_names.json', 'w') as f:
        json.dump(class_names, f, indent=4)
    
    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=16,
        shuffle=True,
        num_workers=4,
        pin_memory=True
    )
    
    val_loader = DataLoader(
        val_dataset,
        batch_size=16,
        shuffle=False,
        num_workers=4,
        pin_memory=True
    )
    
    # 初始化模型
    model = DogClassifier(num_classes=len(train_dataset.classes))
    model = model.to(device)
    
    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.AdamW(
        model.parameters(),
        lr=0.0001,
        weight_decay=0.01,
        amsgrad=True
    )
    
    # 学习率调度器
    scheduler = optim.lr_scheduler.OneCycleLR(
        optimizer,
        max_lr=0.001,
        epochs=50,
        steps_per_epoch=len(train_loader),
        pct_start=0.3,
        div_factor=10,
        final_div_factor=100
    )
    
    # 设置早停参数
    patience = 10  # 连续10轮验证准确率没有提高则停止训练
    
    # 训练模型
    train_losses, train_accs, val_losses, val_accs = train_model(
        model, train_loader, val_loader, criterion, optimizer, scheduler,
        num_epochs=50, device=device, patience=patience
    )
    
    # 绘制训练历史
    plot_training_history(train_losses, train_accs, val_losses, val_accs)

if __name__ == '__main__':
    main()