from model_plus import UNet_ViT
from data_process import Data_Loader
from torch import optim
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.cuda.amp import GradScaler, autocast
import time  # 导入time模块
import os

# 设置环境变量减少内存碎片化
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:128'

# 网络训练模块
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')  # GPU or CPU
print(device)
net = UNet_ViT(in_channels=1, num_classes=1, image_size=584, patch_size=128, vit_embed_dim=768, vit_depth=12, vit_heads=12)  # 加载网络
net.to(device)  # 将网络加载到device上

# 加载训练集
trainset = Data_Loader("processed_data/train/images")
train_loader = torch.utils.data.DataLoader(dataset=trainset, batch_size=1, shuffle=True)
len_train = len(trainset)

# 加载测试集
testset = Data_Loader("processed_data/test/images")
test_loader = torch.utils.data.DataLoader(dataset=testset, batch_size=1)
len_test = len(testset)

# 加载优化器和损失函数
optimizer = optim.RMSprop(net.parameters(), lr=0.00001, weight_decay=1e-8, momentum=0.9)  # 定义优化器
criterion = nn.BCEWithLogitsLoss()  # 定义损失函数

# 保存网络参数
save_path = 'UNet_ViT_128.pth'  # 网络参数的保存路径
best_acc = 0.0  # 保存最好的准确率

# 混合精度训练
scaler = GradScaler()

# 训练
for epoch in range(200):
    start_train_time = time.time()  # 训练开始时间

    net.train()  # 训练模式
    running_loss = 0.0

    for image, label in train_loader:
        image, label = image.to(device), label.to(device)
        
        # 梯度累积
        with autocast():
            pred = net(image)  # 前向传播
            loss = criterion(pred, label)  # 计算损失

        # 检查损失值是否为无穷或NaN
        if not torch.isfinite(loss):
            print(f"Loss is {loss}, skipping backward pass")
            continue

        scaler.scale(loss).backward()  # 反向传播
        scaler.step(optimizer)  # 梯度下降
        scaler.update()
        optimizer.zero_grad()

        running_loss += loss.item()  # 计算损失和

    end_train_time = time.time()  # 训练结束时间
    train_time = end_train_time - start_train_time  # 计算训练时间
    print(f'Epoch {epoch + 1} training time: {train_time:.2f} seconds')

    # 清理显存
    torch.cuda.empty_cache()

    start_predict_time = time.time()  # 预测开始时间

    net.eval()  # 测试模式
    acc = 0.0  # 正确率
    total = 0
    with torch.no_grad():
        for test_image, test_label in test_loader:
            test_image, test_label = test_image.to(device), test_label.to(device)
            outputs = net(test_image)  # 前向传播

            outputs[outputs >= 0] = 1  # 将预测图片转为二值图片
            outputs[outputs < 0] = 0

            # 计算预测图片与真实图片像素点一致的精度：acc = 相同的 / 总个数
            acc += (outputs == test_label).sum().item() / (test_label.size(2) * test_label.size(3))
            total += test_label.size(0)

    end_predict_time = time.time()  # 预测结束时间
    predict_time = end_predict_time - start_predict_time  # 计算预测时间
    print(f'Epoch {epoch + 1} prediction time: {predict_time:.2f} seconds')

    accurate = acc / total  # 计算整个test上面的正确率
    print('[epoch %d] train_loss: %.3f  test_accuracy: %.3f %%' %
          (epoch + 1, running_loss / len_train, accurate * 100))

    if accurate > best_acc:  # 保留最好的精度
        best_acc = accurate
        torch.save(net.state_dict(), save_path)  # 保存网络参数