import os
import torch
import logging
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from transformers import optimization
from tqdm import tqdm

from dataloader_my import build_dataloader
from model_my_fpn import ResNetConvLSTM
from log_manage import logger_init


def train():
    experiment_name = "name1"
    log_dir = os.path.join("logs", experiment_name)
    save_dir = os.path.join("./save_model", experiment_name)
    model_save_path = os.path.join(save_dir, "best_model.pt")
    summary_dir = os.path.join("./runs", experiment_name)
    os.makedirs(save_dir, exist_ok=True)
    os.makedirs(log_dir, exist_ok=True)
    os.makedirs(summary_dir, exist_ok=True)

    logger_init(log_file_name='train_log', log_level=logging.INFO, log_dir=log_dir)
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    logging.info(f"Using device: {device}")
    num_classes = 2
    frame_len = 15
    batch_size = 4
    epochs = 50
    lr = 3e-4
    num_warmup_steps = 200
    image_size = (360, 640)
    pretrained_path = r"./checkpoint/resnet18.pth"
    train_dataset_dir = r'/home/xian/mzs_project/Convlstm/dataset_fog/train'
    test_dataset_dir = r'/home/xian/mzs_project/Convlstm/dataset_fog/test'
    train_loader = build_dataloader(
        root_dir=train_dataset_dir,
        frame_len=frame_len,
        batch_size=batch_size,
        num_workers=2,
        image_size=image_size,
        mode='train'
    )
    val_loader = build_dataloader(
        root_dir=test_dataset_dir,
        frame_len=frame_len,
        batch_size=batch_size,
        num_workers=2,
        image_size=image_size,
        mode='test'
    )
    model = ResNetConvLSTM(num_classes=num_classes, pretrain_path=pretrained_path).to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    total_steps = len(train_loader) * epochs
    scheduler = optimization.get_cosine_schedule_with_warmup(
        optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=total_steps
    )

    writer = SummaryWriter(summary_dir)
    best_acc = 0.0

    for epoch in range(epochs):
        model.train()
        running_loss = 0.0
        running_acc = 0.0

        progress = tqdm(train_loader, desc=f"Epoch [{epoch+1}/{epochs}]")

        for x, y in progress:
            x, y = x.to(device), y.to(device)
            optimizer.zero_grad()

            logits = model(x)
            loss = criterion(logits, y)
            loss.backward()
            optimizer.step()
            scheduler.step()

            acc = (logits.argmax(1) == y).float().mean().item()
            running_loss += loss.item()
            running_acc += acc
            progress.set_postfix(loss=loss.item(), acc=acc)

        avg_loss = running_loss / len(train_loader)
        avg_acc = running_acc / len(train_loader)
        logging.info(f"[Train] Epoch {epoch+1}: Loss={avg_loss:.4f}, Acc={avg_acc:.4f}")
        writer.add_scalar("Train/Loss", avg_loss, epoch)
        writer.add_scalar("Train/Acc", avg_acc, epoch)

        val_acc = evaluate(model, val_loader, device)
        logging.info(f"[Val] Epoch {epoch+1}: Acc={val_acc:.4f}")
        writer.add_scalar("Val/Acc", val_acc, epoch)

        if val_acc > best_acc:
            best_acc = val_acc
            torch.save(model.state_dict(), model_save_path)
            logging.info(f"保存新最优模型 (Acc={best_acc:.4f})")
        if (epoch + 1) % 5 == 0:
            checkpoint_path = os.path.join(save_dir, f"epoch_{epoch + 1}.pt")
            torch.save(model.state_dict(), checkpoint_path)
            logging.info(f"保存周期性 checkpoint -> {checkpoint_path}")

    logging.info(f"训练结束，最佳验证准确率: {best_acc:.4f}")
    writer.close()


def evaluate(model, dataloader, device):
    model.eval()
    correct, total = 0, 0
    with torch.no_grad():
        for x, y in dataloader:
            x, y = x.to(device), y.to(device)
            logits = model(x)
            pred = logits.argmax(1)
            correct += (pred == y).sum().item()
            total += y.size(0)
    model.train()
    return correct / total if total > 0 else 0


if __name__ == "__main__":
    train()
