import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
from model import FruitClassifier
import torch.cuda.amp as amp
from trainer import PrettyTrainer
from collections import Counter
from tqdm import tqdm
from colorama import Fore, Style
from tabulate import tabulate

# 数据预处理
train_transform = transforms.Compose([
    transforms.Resize((224, 224)),  # ResNet50的标准输入尺寸
    transforms.RandomHorizontalFlip(),
    transforms.RandomVerticalFlip(),
    transforms.RandomRotation(15),
    transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),
    transforms.RandomAffine(degrees=0, translate=(0.1, 0.1)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

# 验证集转换
val_transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

# 数据加载
def load_data(data_dir, batch_size=64, is_train=True):
    dataset = datasets.ImageFolder(
        root=data_dir,
        transform=train_transform if is_train else val_transform
    )
    return DataLoader(
        dataset, 
        batch_size=batch_size, 
        shuffle=is_train, 
        num_workers=2,
        pin_memory=True
    ), dataset

def get_class_distribution(dataset):
    """获取数据集的类别分布"""
    targets = dataset.targets
    class_counts = Counter(targets)
    return {dataset.classes[idx]: count for idx, count in class_counts.items()}

def train_with_amp(model, train_loader, val_loader, criterion, optimizer, scheduler, num_epochs, device, classes, trainer):
    """使用混合精度训练的训练函数"""
    scaler = amp.GradScaler()
    best_val_acc = 0
    
    # 早停参数
    early_stop_patience = 10  # 连续10轮验证集准确率没有提升则停止
    early_stop_counter = 0
    
    try:
        for epoch in range(1, num_epochs + 1):
            # 使用trainer的start_epoch方法替代手动打印
            trainer.start_epoch(epoch, optimizer.param_groups[0]['lr'])
            
            # 训练阶段
            model.train()
            train_loss = 0
            train_correct = 0
            train_total = 0
            
            # 使用tqdm创建进度条，但保持美观样式
            train_pbar = tqdm(train_loader, 
                            desc=f"{Fore.GREEN}训练进度{Style.RESET_ALL}",
                            bar_format='{desc}: {percentage:3.0f}%|{bar:50}{r_bar}',
                            leave=True)
            
            for inputs, labels in train_pbar:
                inputs, labels = inputs.to(device), labels.to(device)
                
                # 清除梯度
                optimizer.zero_grad()
                
                # 使用AMP进行混合精度训练
                with amp.autocast():
                    outputs = model(inputs)
                    loss = criterion(outputs, labels)
                
                # 使用scaler进行反向传播
                scaler.scale(loss).backward()
                scaler.step(optimizer)
                scaler.update()
                
                # 统计
                train_loss += loss.item()
                _, predicted = outputs.max(1)
                train_total += labels.size(0)
                train_correct += predicted.eq(labels).sum().item()
                
                # 更新进度条
                train_acc = 100. * train_correct / train_total
                train_pbar.set_postfix_str(f"L={train_loss/train_total:.4f} acc={train_acc:.2f}% LR={optimizer.param_groups[0]['lr']:.1e}")
            
            train_pbar.close()
            
            # 单独显示GPU内存使用情况（在进度条结束后）
            if torch.cuda.is_available():
                allocated = torch.cuda.memory_allocated() / 1024**3
                reserved = torch.cuda.memory_reserved() / 1024**3
                print(f"\nGPU内存: 已分配 {allocated:.2f} GB, 已保留 {reserved:.2f} GB")
            
            # 计算训练指标
            train_loss = train_loss / len(train_loader)
            train_acc = 100. * train_correct / train_total
            
            # 验证阶段
            model.eval()
            val_loss = 0
            val_correct = 0
            val_total = 0
            
            # 使用tqdm创建进度条，但保持美观样式
            val_pbar = tqdm(val_loader, 
                          desc=f"{Fore.BLUE}验证进度{Style.RESET_ALL}",
                          bar_format='{desc}: {percentage:3.0f}%|{bar:50}{r_bar}',
                          leave=True)
            
            with torch.no_grad():
                with amp.autocast():
                    for inputs, labels in val_pbar:
                        inputs, labels = inputs.to(device), labels.to(device)
                        outputs = model(inputs)
                        loss = criterion(outputs, labels)
                        
                        val_loss += loss.item()
                        _, predicted = outputs.max(1)
                        val_total += labels.size(0)
                        val_correct += predicted.eq(labels).sum().item()
                        
                        # 更新进度条
                        val_acc = 100. * val_correct / val_total
                        val_pbar.set_postfix_str(f"L={val_loss/val_total:.4f} acc={val_acc:.2f}%")
            
            val_pbar.close()
            
            # 计算验证指标
            val_loss = val_loss / len(val_loader)
            val_acc = 100. * val_correct / val_total
            
            # 更新学习率
            if isinstance(scheduler, optim.lr_scheduler.ReduceLROnPlateau):
                scheduler.step(val_acc)
            else:
                scheduler.step()
            
            # 保存最佳模型
            is_best = val_acc > best_val_acc
            if is_best:
                best_val_acc = val_acc
                trainer.best_val_acc = val_acc
                trainer.best_epoch = epoch
                trainer.save_checkpoint(model, optimizer, scheduler, is_best=True, val_acc=val_acc)
                print(f"\n{Fore.YELLOW}🌟 新的最佳模型！准确率: {val_acc:.2f}%{Style.RESET_ALL}")
                # 重置早停计数器
                early_stop_counter = 0
            else:
                # 增加早停计数器
                early_stop_counter += 1
                print(f"{Fore.YELLOW}验证准确率未提升，早停计数: {early_stop_counter}/{early_stop_patience}{Style.RESET_ALL}")
            
            # 检查是否应该早停
            if early_stop_counter >= early_stop_patience:
                print(f"\n{Fore.YELLOW}⚠️ 早停触发！连续{early_stop_patience}轮验证准确率未提升{Style.RESET_ALL}")
                print(f"{Fore.GREEN}✓ 训练提前结束，使用最佳模型（第{trainer.best_epoch}轮，准确率: {best_val_acc:.2f}%）{Style.RESET_ALL}")
                break
            
            # 显示本轮结果
            print(f"\n{Fore.GREEN}📊 训练结果 📊{Style.RESET_ALL}")
            metrics = {
                'train_loss': train_loss,
                'train_acc': train_acc,
                'val_loss': val_loss,
                'val_acc': val_acc,
                'best_acc': best_val_acc
            }
            table = [
                ["训练损失", f"{train_loss:.4f}"],
                ["训练准确率", f"{train_acc:.2f}%"],
                ["验证损失", f"{val_loss:.4f}"],
                ["验证准确率", f"{val_acc:.2f}%"],
                ["最佳准确率", f"{best_val_acc:.2f}%"]
            ]
            print(tabulate(table, headers=["指标", "数值"], tablefmt="fancy_grid"))
            
            # 记录指标到trainer以便绘图
            if not hasattr(trainer, 'history'):
                trainer.history = {k: [] for k in metrics.keys()}
            
            for k, v in metrics.items():
                trainer.history[k].append(v)
            
            # 每隔一定轮次绘制训练曲线
            if epoch % trainer.plot_interval == 0:
                trainer.plot_training_curves()
            
            # 清理GPU内存
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
                
    except KeyboardInterrupt:
        trainer.log_warning("训练被用户中断")
        trainer.save_checkpoint(model, optimizer, scheduler, is_best=False, val_acc=val_acc, interrupted=True)
    except Exception as e:
        trainer.log_error(f"训练过程出错: {str(e)}")
        raise
    
    return best_val_acc

def main():
    # 设置随机种子以确保可重复性
    torch.manual_seed(42)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(42)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
    
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    # 数据路径
    train_dir = 'plant_data/Train_Set_Folder'
    val_dir = 'plant_data/Validation_Set_Folder'
    test_dir = 'plant_data/Test_Set_Folder'
    
    # 初始化训练器
    trainer = PrettyTrainer(model_name='FruitClassifier')
    
    # 加载数据
    train_loader, train_dataset = load_data(train_dir, is_train=True)
    val_loader, val_dataset = load_data(val_dir, is_train=False)
    test_loader, test_dataset = load_data(test_dir, is_train=False)
    
    # 计算总样本数和比例
    train_size = len(train_dataset)
    val_size = len(val_dataset)
    test_size = len(test_dataset)
    total_size = train_size + val_size + test_size
    
    # 获取类别分布
    class_distribution = get_class_distribution(train_dataset)
    
    # 显示数据集信息
    data_info = {
        "总图片数量": total_size,
        "类别数量": len(train_dataset.classes),
        "训练集比例": f"{train_size/total_size*100:.1f}%",
        "验证集比例": f"{val_size/total_size*100:.1f}%",
        "测试集比例": f"{test_size/total_size*100:.1f}%",
        "class_distribution": class_distribution
    }
    trainer.data_summary(data_info)
    
    # 初始化模型
    model = FruitClassifier(model_name='resnet50')
    model = model.to(device)
    
    # 使用标签平滑的交叉熵损失
    criterion = nn.CrossEntropyLoss(label_smoothing=0.1)
    
    # 使用带权重衰减的AdamW优化器
    optimizer = optim.AdamW(model.parameters(), lr=1e-3, weight_decay=1e-2)
    
    # 使用余弦退火学习率调度器
    scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(
        optimizer, 
        T_0=5,  # 第一次重启的周期
        T_mult=2,  # 每次重启后周期长度的倍数
        eta_min=1e-6  # 最小学习率
    )
    
    # 显示模型信息
    model_info = {
        "model_type": "FruitClassifier (基于ResNet50)",
        "total_params": sum(p.numel() for p in model.parameters()),
        "trainable_params": sum(p.numel() for p in model.parameters() if p.requires_grad),
        "optimizer": "AdamW (weight_decay=1e-2)",
        "learning_rate": "1e-3",
        "loss_function": "CrossEntropyLoss (label_smoothing=0.1)"
    }
    trainer.model_summary(model_info)
    
    # 训练参数
    num_epochs = 50
    
    try:
        # 第一阶段：训练分类器
        trainer.log("\n开始第一阶段：训练分类器层...")
        best_acc = train_with_amp(
            model, train_loader, val_loader, criterion, optimizer, scheduler,
            num_epochs, device, train_dataset.classes, trainer
        )
        
        # 第二阶段：微调特征提取器
        trainer.log("\n开始第二阶段：微调特征提取器...")
        
        # 加载第一阶段的最佳模型
        checkpoint = torch.load(os.path.join(trainer.model_dir, 'best_model.pth'))
        model.load_state_dict(checkpoint['model_state_dict'])
        
        # 解冻最后两层
        model.unfreeze_features(unfreeze_last_n_layers=2)
        
        # 重新设置优化器，使用较小的学习率
        optimizer = optim.AdamW([
            {'params': model.base_model.parameters(), 'lr': 1e-4},
            {'params': model.classifier.parameters(), 'lr': 1e-3}
        ], weight_decay=1e-2)
        
        scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(
            optimizer, T_0=5, T_mult=2, eta_min=1e-6
        )
        
        # 开始微调训练
        best_acc = train_with_amp(
            model, train_loader, val_loader, criterion, optimizer, scheduler,
            num_epochs, device, train_dataset.classes, trainer
        )
        
        # 最终测试
        trainer.log("\n开始最终测试...")
        checkpoint = torch.load(os.path.join(trainer.model_dir, 'best_model.pth'))
        model.load_state_dict(checkpoint['model_state_dict'])
        
        # 使用AMP进行测试
        model.eval()
        test_loss = 0
        test_correct = 0
        test_total = 0
        
        with torch.no_grad():
            with amp.autocast():
                for inputs, labels in test_loader:
                    inputs, labels = inputs.to(device), labels.to(device)
                    outputs = model(inputs)
                    loss = criterion(outputs, labels)
                    
                    test_loss += loss.item()
                    _, predicted = outputs.max(1)
                    test_total += labels.size(0)
                    test_correct += predicted.eq(labels).sum().item()
        
        test_loss = test_loss / len(test_loader)
        test_acc = 100. * test_correct / test_total
        
        trainer.log(f"\n最终测试结果:")
        trainer.log(f"测试损失: {test_loss:.4f}")
        trainer.log(f"测试准确率: {test_acc:.2f}%")
        
    except KeyboardInterrupt:
        trainer.log_warning("训练被用户中断")
    except Exception as e:
        trainer.log_error(f"训练过程出错: {str(e)}")
    finally:
        trainer.finish_training()

if __name__ == '__main__':
    main()