import torch
import time
from tqdm import tqdm
from torch.utils.data import DataLoader

from dataset_SUVmax import MRIDataset
from multimodal_efficientnet import MultiModalEfficientNet
import torch.nn.functional as F
import torchvision.transforms as transforms

# ================== 配置部分 ==================
BATCH_SIZE = 4 #批次
EPOCHS = 30 # 迭代次数
LR = 1e-3 # 学习率
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 显卡选择
LOG_INTERVAL = 5  # 控制预测值打印频率

print(" ****** loading dataset ****** ")
data_transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Resize((224, 224)),
                transforms.Normalize([0.5], [0.5]),
                transforms.Lambda(lambda x: x.squeeze(0)),
                ]) 

train_dataset = MRIDataset(root_dir='./datasets/pT/three_label_data/train',transform=data_transform) # 数据要先划分好训练集和验证集以及测试集
val_dataset = MRIDataset(root_dir='./datasets/pT/three_label_data/val',transform=data_transform)

train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, drop_last=True)

# ================== 模型初始化 ==================
print(" ****** create model ****** ")
model = MultiModalEfficientNet(in_channels=3, num_classes=2).to(DEVICE)
criterion = torch.nn.CrossEntropyLoss() # 交叉熵
optimizer = torch.optim.AdamW(model.parameters(), lr=0.001, weight_decay=0.9)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)

# ================== 训练函数 ==================
def train_epoch(model, loader, epoch):
    """单个训练周期"""
    model.train()

    total_loss = 0.0
    correct = 0
    total_processed = 0

    progress_bar = tqdm(loader, desc=f"训练 Epoch {epoch+1}", leave=False)
    
    for batch_idx, (imgs, roi, labels, pid) in enumerate(progress_bar): # imgs,rois都为(B, M=3, D, H, W)维度的数据
        imgs, labels = imgs.to(DEVICE), labels.to(DEVICE)

        optimizer.zero_grad()
        logits = model(imgs)
        loss = criterion(logits, labels)

        loss.backward()  # 反向传播总损失
        optimizer.step()
        
        total_loss += loss.item()
        probs = F.softmax(logits, dim=1)
        preds = probs.argmax(dim=1)
        correct += (preds == labels).sum().item()
        
        current_batch_size = labels.size(0)
        total_processed += current_batch_size
        progress_bar.set_postfix({
            'total_loss': f"{total_loss/(batch_idx+1):.8f}",
            'acc': f"{(correct/total_processed):.3%}"
        })
        
    return total_loss / len(loader), correct / len(loader.dataset)

# ================== 验证函数 ==================
def validate(model, loader, epoch):
    """验证周期"""
    model.eval()

    total_loss = 0.0
    correct = 0
    total_processed = 0

    progress_bar = tqdm(loader, desc=f"验证 Epoch {epoch+1}", leave=False)
    
    with torch.no_grad():
        for batch_idx, (imgs, roi, labels, pid ) in enumerate(progress_bar):
            # 数据迁移到设备
            imgs, labels = imgs.to(DEVICE), labels.to(DEVICE)

            # 前向传播
            logits = model(imgs)
            loss = criterion(logits, labels)

            # 统计指标
            total_loss += loss.item()

            preds = logits.argmax(dim=1)
            correct += (preds == labels).sum().item()
            
            # 更新进度条
            current_batch_size = labels.size(0)
            total_processed += current_batch_size

            progress_bar.set_postfix({
                'total_loss': f"{total_loss/(batch_idx+1):.8f}",
                'acc': f"{(correct/total_processed):.3%}"
            })
            # 打印预测信息
            if batch_idx % LOG_INTERVAL == 0:
                progress_bar.write(
                    f"Batch {batch_idx}: 预测 {logits.argmax(dim=1).cpu().numpy()} | 真实 {labels.cpu().numpy()}"
                )
    
    return total_loss / len(loader), correct / len(loader.dataset)

# ================== 主训练循环 ==================
def main():
    best_val_acc = 0.0
    start_time = time.time()
    
    for epoch in range(EPOCHS):
        epoch_start = time.time()
        
        # 训练阶段
        train_loss, train_acc = train_epoch(model, train_loader, epoch)
        
        # 验证阶段
        val_loss, val_acc = validate(model, val_loader, epoch)

        scheduler.step()
        # 保存最佳模型
        if val_acc > best_val_acc:
            print(f"新的最佳模型保存于 epoch {epoch+1}")
            best_val_acc = val_acc
            torch.save(model.state_dict(), "./model_efficientNet_send/best_model_three.pth")
        
        # 输出epoch统计信息
        epoch_time = time.time() - epoch_start
        total_time = time.time() - start_time
        tqdm.write("\n" + "=" * 60)
        tqdm.write(f"Epoch {epoch+1}/{EPOCHS} 完成 [耗时 {epoch_time:.1f}s]")
        tqdm.write(f"训练损失: {train_loss:.8f}  准确率: {train_acc:.3%}")
        tqdm.write(f"验证损失: {val_loss:.8f}  准确率: {val_acc:.3%}")
        tqdm.write(f"累计时间: {total_time//3600:.0f}h {total_time%3600//60:.0f}m")
        tqdm.write("=" * 60 + "\n")
    
    # 最终输出
    tqdm.write(f"\n训练完成！最佳验证准确率: {best_val_acc:.1%}")

if __name__ == "__main__":
    main()