"""
model name : 深度学习
file       : train.py
information:
    author : OuYang
    time   : 2025/1/14
"""
import os
import tqdm

# Get Current Time
from datetime import datetime

from torch.utils.data import random_split
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from dataset import YOLODataset
from model import YOLO
from loss import YOLOLoss
from utils.optimizer import select_optimizer

import torch

current_time = datetime.now().strftime('%m-%d-%H-%M')

# Base arguments
data_root = "/data/coding/datasets/vocDetect"
image_size = 448
num_classes = 20
batch_size = 32
backbone = "resnet34"
optim_name = "SGD"
step_size = 10
is_split = True
split_rate = 0.05
pretrained = True
shuffle = True
drop_last = True
learning_rate = 0.01
momentum = 0.9
decay = 0.0005
epochs = 300
total_train_step = 0
total_valid_step = 0

# Print info
print(
    f"Data root: {data_root}\n" +
    f"num_classes: {num_classes}\n" +
    f"batch_size: {batch_size}\n" +
    f"backbone: {backbone}\n" +
    f"optim_name: {optim_name}\n" +
    f"learning_rate: {learning_rate}\n" +
    f"epochs: {epochs}\n"
)

# Device
device = torch.device("cpu")
if torch.cuda.is_available():
    print("Using GPU for training")
    device = torch.device("cuda")

# Read Data
train_set = YOLODataset(
    root=data_root,
    train=True,
    num_classes=num_classes,
    transform=transforms.Compose([
        transforms.Resize((image_size, image_size)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
)
valid_set = YOLODataset(
    root=data_root,
    train=False,
    num_classes=num_classes,
    transform=transforms.Compose([
        transforms.Resize((image_size, image_size)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
)

# Data len
train_len = len(train_set)
valid_len = len(valid_set)
print(f"Train set size: {train_len}")
print(f"Valid set size: {valid_len}")

# Split Data
if is_split:
    train_size = int(split_rate * train_len)
    _size = train_len - train_size
    train_set, _ = random_split(train_set, [train_size, _size])
    valid_size = int(split_rate * valid_len)
    _size = valid_len - valid_size
    valid_set, _ = random_split(valid_set, [valid_size, _size])
    train_len = len(train_set)
    valid_len = len(valid_set)
    print(f"After Split Train set size: {train_len}")
    print(f"After Split Valid set size: {valid_len}")

# Load Data
train_loader = torch.utils.data.DataLoader(
    dataset=train_set,
    batch_size=batch_size,
    shuffle=shuffle,
    drop_last=drop_last
)

valid_loader = torch.utils.data.DataLoader(
    dataset=valid_set,
    batch_size=batch_size,
    drop_last=drop_last
)

# Create Model
model = YOLO(
    backbone=backbone,
    num_classes=num_classes,
    pretrained=pretrained
)
model.to(device)

# Loss function
loss_fn = YOLOLoss()
loss_fn.to(device)

# optimizer
optimizer = select_optimizer(
    optim_name=optim_name,
    model_parameters=model.parameters(),
    momentum=momentum,
    lr=learning_rate,
    weight_decay=decay
)

# Scheduler
scheduler = torch.optim.lr_scheduler.StepLR(
    optimizer,
    step_size=step_size,
    gamma=0.9
)

# Create Tensorboard
writer = SummaryWriter(log_dir=f"logs/Train-{current_time}")

# Training
for epoch in range(epochs):
    print(f"{'-' * 10} Epoch {epoch + 1: 3} {'-' * 10}")
    print(f"Used GPU memory: {torch.cuda.memory_allocated(0) / 1024 ** 2:.2f}MB")
    print(f"Learning rate  : {optimizer.param_groups[0]['lr']:.8}")

    # Train
    model.train()
    total_train_loss = 0
    total_train_cls_loss = 0
    total_train_conf_loss = 0
    total_train_location_loss = 0
    train_loader_len = len(train_loader)
    with tqdm.tqdm(total=train_loader_len) as qbar:
        for data in train_loader:
            inputs, targets = data

            # To GPU
            inputs, targets = inputs.to(device), targets.to(device)

            # Forward
            outputs = model(inputs)

            # Cal loss
            loss, cls_loss, conf_loss, location_loss = loss_fn(outputs, targets)

            # Grad Down
            optimizer.zero_grad()
            loss.backward()

            # Grad Clip
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=2.5)

            # Update
            optimizer.step()

            # loss add
            total_train_loss += loss.item()
            total_train_cls_loss += cls_loss.item()
            total_train_conf_loss += conf_loss.item()
            total_train_location_loss += location_loss.item()

            qbar.set_description(
                f"Train Loss: {total_train_loss: .4} Cls_loss: {total_train_cls_loss: .4} Conf_loss: {total_train_conf_loss: .4} Location_loss: {total_train_location_loss:.4}"
            )

            qbar.update(1)

    total_train_step += 1
    print(f"Total Train Cls Loss: {total_train_cls_loss:.8}")
    print(f"Total Train Conf Loss: {total_train_conf_loss:.8}")
    print(f"Total Train Location Loss: {total_train_location_loss:.8}")
    print(f"Total Train Loss: {total_train_loss:.8}")
    writer.add_scalar('train_loss', total_train_loss, total_train_step)

    writer.add_scalars(
        f"Train_Cls_Conf_Location",
        {
            f"Train_Cls_loss": total_train_cls_loss,
            f"Train_Conf_loss": total_train_conf_loss,
            f"Train_Location_loss": total_train_location_loss
        },
        total_train_step
    )

    # Recode grad
    for name, param in model.named_parameters():
        if param.requires_grad:
            writer.add_histogram(f"{name}.grad", param.grad, epoch + 1)

    # Valid
    model.eval()
    valid_loader_len = len(valid_loader)
    with tqdm.tqdm(total=valid_loader_len) as qbar:
        with torch.no_grad():
            total_valid_loss = 0
            total_valid_cls_loss = 0
            total_valid_conf_loss = 0
            total_valid_location_loss = 0

            for data in valid_loader:
                inputs, labels = data

                # To GPU
                inputs, labels = inputs.to(device), labels.to(device)

                # Forward
                outputs = model(inputs)

                # Cal Loss
                loss, cls_loss, conf_loss, location_loss = loss_fn(outputs, labels)

                total_valid_loss += loss.item()
                total_valid_cls_loss += cls_loss.item()
                total_valid_conf_loss += conf_loss.item()
                total_valid_location_loss += location_loss.item()
                qbar.set_description(
                    f"Valid Loss: {total_valid_loss: .4} Cls_loss: {total_valid_cls_loss: .4} Conf_loss: {total_valid_conf_loss: .4} Location_loss: {total_valid_location_loss:.4}"
                )

                qbar.update(1)

    total_valid_step += 1
    print(f"Total Valid Cls Loss: {total_valid_cls_loss:.8}")
    print(f"Total Valid Conf Loss: {total_valid_conf_loss:.8}")
    print(f"Total Valid Location Loss: {total_valid_location_loss:.8}")
    print(f"Total Valid Loss: {total_valid_loss}")
    writer.add_scalar('valid_loss', total_valid_loss, total_valid_step)
    writer.add_scalars(
        f"Valid_Cls_Conf_Location",
        {
            f"Valid_Cls_loss": total_valid_cls_loss,
            f"Valid_Conf_loss": total_valid_conf_loss,
            f"Valid_Location_loss": total_valid_location_loss
        },
        total_valid_step
    )

    # Update lr
    scheduler.step()

    # Save model
    if (epoch + 1) % step_size == 0:
        if not os.path.exists(f"models/Model-{current_time}"):
            os.mkdir(f"models/Model-{current_time}")
        torch.save(model.state_dict(), f"models/Model-{current_time}/model_{epoch + 1}.pth")

writer.close()
