import torch
import torch.nn as nn
from torch.cuda.amp import autocast, GradScaler
from tqdm import tqdm
import time
import gc

from split_atte import trainloader, testloader, device  # 需要用到数据和 device



# 训练函数 - 使用GPU、混合精度训练
def train_model(model, epochs=100, lr=0.001):
    import gc  # 添加导入
    model = model.to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=1e-4)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)
    
    # 混合精度训练
    scaler = GradScaler()
    
    # 保存最佳模型
    best_acc = 0.0
    
    for epoch in range(epochs):
        start_time = time.time()
        
        # 训练阶段
        model.train()
        train_loss = 0
        correct = 0
        total = 0
        
        pbar = tqdm(trainloader, desc=f'Epoch {epoch+1}/{epochs} [Train]')
        for inputs, targets in pbar:
            inputs, targets = inputs.to(device), targets.to(device)
            
            optimizer.zero_grad()
            
            # 使用混合精度训练
            with autocast():
                outputs = model(inputs)
                loss = criterion(outputs, targets)
            
            # 反向传播与优化
            scaler.scale(loss).backward()
            
            # 梯度裁剪（避免梯度爆炸）
            scaler.unscale_(optimizer)
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            
            scaler.step(optimizer)
            scaler.update()
            
            train_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            
            # 更新进度条
            pbar.set_postfix({
                'loss': f'{train_loss/total:.4f}',
                'acc': f'{100.*correct/total:.2f}%'
            })
            
            # 删除临时变量释放内存
            del inputs, targets, outputs, loss, predicted
        
        train_acc = 100. * correct / total
        
        # 测试阶段
        model.eval()
        test_loss = 0
        correct = 0
        total = 0
        
        with torch.no_grad():
            pbar = tqdm(testloader, desc=f'Epoch {epoch+1}/{epochs} [Test]')
            for inputs, targets in pbar:
                inputs, targets = inputs.to(device), targets.to(device)
                
                with autocast():
                    outputs = model(inputs)
                    loss = criterion(outputs, targets)
                
                test_loss += loss.item()
                _, predicted = outputs.max(1)
                total += targets.size(0)
                correct += predicted.eq(targets).sum().item()
                
                # 更新进度条
                pbar.set_postfix({
                    'loss': f'{test_loss/total:.4f}',
                    'acc': f'{100.*correct/total:.2f}%'
                })
                
                # 删除临时变量释放内存
                del inputs, targets, outputs, loss, predicted
        
        test_acc = 100. * correct / total
        
        # 保存最佳模型
        if test_acc > best_acc:
            print(f'保存最佳模型，测试精度: {test_acc:.2f}%')
            best_acc = test_acc
            torch.save(model.state_dict(), 'resnest_best.pth')
        
        scheduler.step()
        
        # 打印训练信息
        epoch_time = time.time() - start_time
        print(f'Epoch: {epoch+1}/{epochs}, '
              f'Train Loss: {train_loss/len(trainloader):.4f}, Train Acc: {train_acc:.2f}%, '
              f'Test Loss: {test_loss/len(testloader):.4f}, Test Acc: {test_acc:.2f}%, '
              f'Time: {epoch_time:.2f}s')
        
        # 清理GPU内存
        gc.collect()
        torch.cuda.empty_cache()
    
    return model