"""
@FileName：train.py
@Description：
@Author：Lemon
@Time：2025/5/8 15:23
"""

import os
import torch
import torch.nn as nn
from tqdm import tqdm
import logging
from model import ResNet18

logging.basicConfig(level=logging.INFO)


class EarlyStopping:
    def __init__(self, patience=10, min_delta=0.0):
        self.patience = patience
        self.min_delta = min_delta
        self.counter = 0
        self.best_loss = float('inf')

    def step(self, current_loss):
        if current_loss < self.best_loss - self.min_delta:
            self.best_loss = current_loss
            self.counter = 0
            return False  # no early stop
        else:
            self.counter += 1
            return self.counter >= self.patience


def compute_metrics(score, label, loss):
    rmse = torch.sqrt(((score - label) ** 2).mean()).item()
    mae = torch.abs(score - label).mean().item()
    return loss.item(), rmse, mae


def validate(model, test_loader, device, criterion, scaler=None):
    model.eval()
    with torch.no_grad():
        total_rmse, total_mae, total_loss = 0., 0., 0.
        steps = 0
        for img, label, _ in test_loader:
            img = img.to(device)
            label = label.unsqueeze(1).to(device).to(torch.float32)
            with torch.cuda.amp.autocast(enabled=(scaler is not None)):
                score = model(img)
                loss = criterion(score, label)
            loss_val, rmse, mae = compute_metrics(score, label, loss)
            total_loss += loss_val
            total_rmse += rmse
            total_mae += mae
            steps += 1
        return total_rmse / steps, total_mae / steps, total_loss / steps


def train(train_loader, test_loader, writer, epochs, lr, device, model_dict):
    os.makedirs(model_dict, exist_ok=True)

    model = ResNet18().to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    criterion = nn.MSELoss()
    scaler = torch.amp.GradScaler(enabled=torch.cuda.is_available())
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=5, factor=0.5)
    early_stopper = EarlyStopping(patience=10, min_delta=1e-4)

    best_val_loss = float('inf')

    for epoch in range(epochs):
        model.train()
        total_rmse, total_mae, total_loss = 0., 0., 0.
        step = 0
        loader = tqdm(train_loader, desc=f"Epoch {epoch}")

        for img, label, _ in loader:
            img = img.to(device)
            label = label.unsqueeze(1).to(device).to(torch.float32)

            optimizer.zero_grad()
            with torch.amp.autocast('cuda', enabled=torch.cuda.is_available()):
                score = model(img)
                loss = criterion(score, label)

            scaler.scale(loss).backward()
            scaler.step(optimizer)
            scaler.update()

            loss_val, rmse, mae = compute_metrics(score, label, loss)
            total_loss += loss_val
            total_rmse += rmse
            total_mae += mae
            step += 1

            loader.set_description(f"Epoch {epoch} Step {step} RMSE {rmse:.2f} MAE {mae:.2f}")

        avg_rmse = total_rmse / step
        avg_mae = total_mae / step
        avg_loss = total_loss / step

        val_rmse, val_mae, val_loss = validate(model, test_loader, device, criterion, scaler)
        scheduler.step(val_loss)

        writer.log_train(avg_rmse, avg_mae, avg_loss, val_rmse, val_mae, val_loss, epoch)

        print(f"[Epoch {epoch}] Train -> RMSE: {avg_rmse:.2f}, MAE: {avg_mae:.2f}, Loss: {avg_loss:.4f}")
        print(f"[Epoch {epoch}] Valid -> RMSE: {val_rmse:.2f}, MAE: {val_mae:.2f}, Loss: {val_loss:.4f}")
        logging.info(f"Train RMSE: {avg_rmse:.2f} | Val RMSE: {val_rmse:.2f}")

        if val_loss < best_val_loss:
            best_val_loss = val_loss
            model_path = os.path.join(model_dict, f"ResNet_epoch{epoch}_loss{val_loss:.3f}.pth")
            torch.save({'ResNet': model.state_dict()}, model_path)
            print(f"✅ Model saved: {model_path}")
            logging.info(f"Model saved at epoch {epoch} with val_loss: {val_loss:.4f}")

        if early_stopper.step(val_loss):
            print("⏹️ Early stopping triggered.")
            logging.info("Early stopping.")
            break

