import torch
from torch.utils.data import DataLoader
from torch.cuda.amp import autocast, GradScaler
from torch import nn
import torchvision.transforms as transforms

from model_vit_radimageNet.dataset_SUV_max_center import MRIDataset  
from vit import ViT

def train_vgraph(config):
    # 添加TensorBoard记录器
    
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")
    
    model = ViT(image_size=224,patch_size=16,num_classes=2,dim=768,depth=12,heads=12, mlp_dim=3072).to(device)
    print(f"Model created with {sum(p.numel() for p in model.parameters()):,} parameters")
    train_dataset = MRIDataset(
        root_dir='D:/PyChrom/PythonProject/2.medical_image/DeepTrip/datasets/pT/three_label_data/train',
        augment=True,
        downsample_ratio=0.5)
    
    val_dataset = MRIDataset(
        root_dir='D:/PyChrom/PythonProject/2.medical_image/DeepTrip/datasets/pT/three_label_data/val',
        augment=True,
        downsample_ratio=0.5)
    
    train_loader = DataLoader(train_dataset, batch_size=2, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=2, shuffle=True)
    
    print(f"Training on {len(train_dataset)} samples, validating on {len(val_dataset)} samples")
    
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.AdamW(model.parameters(), lr=config['lr'])
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, 
        mode='max', 
        factor=0.5, 
        patience=3, 
        verbose=True
    )  # 添加学习率调度器
    scaler = GradScaler()
    
    # 训练循环
    best_val_acc = 0.0
    for epoch in range(config['epochs']):
        train_dataset.reset_epoch()  # 0类样本例子不变，1类重新抽取过，本就会打乱，所以影响不大；可以提升样本平衡性
        print(f"\n{'=' * 40}")
        print(f"Epoch {epoch+1}/{config['epochs']}")
        print(f"{'=' * 40}")
        
        # 训练阶段
        model.train()
        running_loss = 0.0
        train_correct = 0
        train_total = 0
        
        for i, (images, rois, labels) in enumerate(train_loader):
            images = images.to(device)
            rois = rois.to(device)
            labels = labels.to(device)
            
            optimizer.zero_grad()
            
            # 混合精度训练
            with autocast():
                outputs = model(images, rois)
                loss = criterion(outputs, labels)
            
            scaler.scale(loss).backward()
            scaler.step(optimizer)
            scaler.update()
            
            # 计算训练准确率
            _, predicted = torch.max(outputs.data, 1)
            train_total += labels.size(0)
            train_correct += (predicted == labels).sum().item()
            
            running_loss += loss.item()
            
            # 每50个batch打印一次进度
            if i % 50 == 49:
                batch_acc = (predicted == labels).float().mean().item()
                print(f"  Batch {i+1}/{len(train_loader)} | "
                    f"Batch Loss: {loss.item():.4f} | "
                    f"Batch Acc: {batch_acc:.4f}")
        
        # 计算epoch平均训练指标
        avg_train_loss = running_loss / len(train_loader)
        train_acc = train_correct / train_total

        val_acc, val_loss = evaluate(model, val_loader, criterion, device)
        scheduler.step(val_acc)
        
        print(f"\nEpoch Summary:")
        print(f"  Train Loss: {avg_train_loss:.4f} | Train Acc: {train_acc:.4f}")
        print(f"  Val Loss: {val_loss:.4f} | Val Acc: {val_acc:.4f}")
        print(f"  Learning Rate: {optimizer.param_groups[0]['lr']:.2e}")
        
        # 保存最佳模型
        if val_acc > best_val_acc:
            torch.save(model.state_dict(), "best_vgraph.pth")
            best_val_acc = val_acc
            print(f"  New best model saved with validation accuracy: {val_acc:.4f}")
    
    print(f"\n{'=' * 40}")
    print(f"Training completed! Best validation accuracy: {best_val_acc:.4f}")

def evaluate(model, loader, criterion, device):
    model.eval()
    correct = 0
    total = 0
    running_loss = 0.0
    
    with torch.no_grad():
        for images, rois, labels in loader:
            images = images.to(device)
            rois = rois.to(device)
            labels = labels.to(device)
            
            outputs = model(images, rois)
            loss = criterion(outputs, labels)
            running_loss += loss.item()
            
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    
    accuracy = correct / total
    avg_loss = running_loss / len(loader)
    return accuracy, avg_loss

# 配置示例
if __name__ == "__main__":
    config = {
        'bs': 4,
        'lr': 3e-5,
        'epochs': 10
    }
    train_vgraph(config)