"""
model name : MY_YOLO
file       : train.py
information:
    author : OuYang
    time   : 2025/1/23
"""
import argparse
import os

import torch
from torch.utils.data import random_split
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm

from dataset import YOLODataset
from model import YOLO
from loss import YOLOLoss
from utils.utils import select_optimizer

# Get Current Time
from datetime import datetime

current_time = datetime.now().strftime("%m-%d-%H-%M")


def parse_opt():
    parser = argparse.ArgumentParser()

    parser.add_argument("--data_root", type=str, default="/data/coding/datasets/vocDetect", help="Data Root")
    parser.add_argument("--num_classes", type=int, default=20, help="Number of classes")
    parser.add_argument("--batch_size", type=int, default=1, help="Batch Size")
    parser.add_argument("--epochs", type=int, default=10, help="Epochs")
    parser.add_argument("--imgsz", type=int, default=448, help="Image size")
    parser.add_argument("--backbone", type=str, default="resnet34", help="Backbone")
    parser.add_argument("--optim", type=str, default="SGD", help="Modify config options using the")
    parser.add_argument("--lr", type=float, default=0.001, help="Learning Rate")
    parser.add_argument("--momentum", type=float, default=0.9, help="Momentum parameter")
    parser.add_argument("--weight_decay", type=float, default=0.0005, help="Weight decay parameter")
    parser.add_argument("--shuffle", type=bool, default=True, help="Shuffle")
    parser.add_argument("--step_size", type=int, default=10, help="Step size")
    parser.add_argument("--is_split", type=bool, default=True, help="Is split Note: set split rate")
    parser.add_argument("--split_rate", type=float, default=0.1, help="Split Rate")
    parser.add_argument("--drop_last", type=bool, default=False, help="Drop last data")
    parser.add_argument("--pretrained", type=str, default=True, help="Pretrained")
    parser.add_argument("--save_dir", type=str, default=f"./runs/train/{current_time}/", help="Save model")
    parser.add_argument("--s", type=int, default=7, help="s")
    parser.add_argument("--b", type=int, default=2, help="b")
    parser.add_argument("--gamma", type=float, default=0.9, help="scheduler gamma")
    parser.add_argument("--clip", type=float, default=2.5, help="Gradient clipping")

    return parser.parse_args()


def train(opt):
    # Save
    if not os.path.exists(opt.save_dir):
        os.makedirs(opt.save_dir)
        os.makedirs(os.path.join(opt.save_dir, "logs"))
        os.makedirs(os.path.join(opt.save_dir, f"models"))

    # Device
    device = torch.device("cpu")
    if torch.cuda.is_available():
        print("Using GPU for training")
        device = torch.device("cuda")

    # Print and Save info
    with open(os.path.join(opt.save_dir, "arguments.txt"), "w") as file:
        for key, value in vars(opt).items():
            file.write(f"{key}={value}\n")

    # Read Data
    train_set = YOLODataset(
        root=opt.data_root,
        train=True,
        num_classes=opt.num_classes
    )
    train_len = len(train_set)
    valid_set = YOLODataset(
        root=opt.data_root,
        train=False,
        num_classes=opt.num_classes
    )
    valid_len = len(valid_set)
    print(f"Train set size: {train_len}")
    print(f"Valid set size: {valid_len}")

    # Split Data
    if opt.is_split and opt.split_rate > 0:
        train_size = int(opt.split_rate * train_len)
        _size = train_len - train_size
        train_set, _ = random_split(train_set, [train_size, _size])
        valid_size = int(opt.split_rate * valid_len)
        _size = valid_len - valid_size
        valid_set, _ = random_split(valid_set, [valid_size, _size])
        train_len = len(train_set)
        valid_len = len(valid_set)
        print(f"After Split Train set size: {train_len}")
        print(f"After Split Valid set size: {valid_len}")

    # Load Data
    train_loader = torch.utils.data.DataLoader(
        dataset=train_set,
        batch_size=opt.batch_size,
        shuffle=opt.shuffle,
        drop_last=opt.drop_last
    )

    valid_loader = torch.utils.data.DataLoader(
        dataset=valid_set,
        batch_size=opt.batch_size,
        drop_last=opt.drop_last
    )

    # Create Model
    model = YOLO(
        backbone=opt.backbone,
        num_classes=opt.num_classes,
        pretrained=opt.pretrained,
    )
    model.to(device)

    # Loss function
    loss_fn = YOLOLoss()
    loss_fn.to(device)

    # optimizer
    optimizer = select_optimizer(
        optim_name=opt.optim,
        model_parameters=filter(lambda p: p.requires_grad, model.parameters()),
        momentum=opt.momentum,
        lr=opt.lr,
        weight_decay=opt.weight_decay
    )

    # Scheduler
    scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer=optimizer,
        step_size=opt.step_size,
        gamma=opt.gamma
    )

    # Create Tensorboard
    writer = SummaryWriter(log_dir=os.path.join(opt.save_dir, 'logs'))

    # Training
    for epoch in range(opt.epochs):
        print(f"{'-' * 10} Epoch {epoch + 1: 3} {'-' * 10}")
        print(f"Used GPU memory: {torch.cuda.memory_allocated(0) / 1024 ** 2:.2f}MB")
        print(f"Learning rate  : {optimizer.param_groups[0]['lr']:.8}")

        # Train
        model.train()
        train_loader_len = len(train_loader)
        with tqdm(total=train_loader_len) as qbar:
            train_loss_sum = 0
            train_cls_loss_sum = 0
            train_cof_loss_sum = 0
            train_loc_loss_sum = 0
            for data in train_loader:
                inputs, labels = data

                # To GPU
                inputs = inputs.to(device)
                labels = labels.to(device)

                # Forward
                output = model(inputs)

                # Calculate Loss
                loss, cls_loss, cof_loss, loc_loss = loss_fn(output, labels)

                # Backward
                optimizer.zero_grad()
                loss.backward()

                # Grad Clip
                torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=opt.clip)

                # Update
                optimizer.step()

                train_loss_sum += loss.item()
                train_cls_loss_sum += cls_loss.item()
                train_cof_loss_sum += cof_loss.item()
                train_loc_loss_sum += loc_loss.item()

                qbar.set_description(
                    f"Train_loss: {loss.item() / opt.batch_size:.8f}|"
                    f"Cls: {cls_loss.item() / opt.batch_size:.8f}|"
                    f"Cof: {cof_loss.item() / opt.batch_size:.8f}|"
                    f"Loc: {loc_loss.item() / opt.batch_size:.8f}"
                )


                qbar.update(1)

        print(f"Train Loss: {train_loss_sum / (opt.batch_size * train_loader_len):.8f}")
        writer.add_scalar(
            "Train Loss", train_loss_sum / train_len, epoch + 1
        )
        writer.add_scalars(
            "Train item ",
            {
                "loss": loss.item() / opt.batch_size,
                "cls": cls_loss.item() / opt.batch_size,
                "cof": cof_loss.item() / opt.batch_size,
                "loc": loc_loss.item() / opt.batch_size
            },
            epoch + 1
        )

        # Recode grad
        for name, param in model.named_parameters():
            if param.requires_grad:
                writer.add_histogram(f"{name}", param.grad, epoch + 1)

        # Valid
        model.eval()
        valid_loader_len = len(valid_loader)
        with tqdm(total=valid_loader_len) as qbar:
            with torch.no_grad():
                valid_loss_sum = 0
                for data in valid_loader:
                    inputs, labels = data

                    # To GPU
                    inputs = inputs.to(device)
                    labels = labels.to(device)

                    # Forward
                    output = model(inputs)

                    # Calculate Loss
                    loss, cls_loss, cof_loss, loc_loss = loss_fn(output, labels)

                    valid_loss_sum += loss.item()

                    qbar.set_description(
                        f"Valid_loss: {loss.item() / opt.batch_size:.8f}|"
                        f"Cls: {cls_loss.item() / opt.batch_size:.8f}|"
                        f"Cof: {cof_loss.item() / opt.batch_size:.8f}|"
                        f"Loc: {loc_loss.item() / opt.batch_size:.8f}"
                    )

                    qbar.update(1)

        print(f"Valid Loss: {valid_loss_sum / (opt.batch_size * valid_loader_len):.8f}")
        writer.add_scalar("Valid Loss", valid_loss_sum / valid_len, epoch + 1)
        writer.add_scalars(
            "Valid item ",
            {
                "loss": loss.item() / opt.batch_size,
                "cls": cls_loss.item() / opt.batch_size,
                "cof": cof_loss.item() / opt.batch_size,
                "loc": loc_loss.item() / opt.batch_size
            },
        )

        # Update lr
        scheduler.step()

        # Save model
        if (epoch + 1) % opt.step_size == 0:
            torch.save(model.state_dict(), os.path.join(opt.save_dir, f"models/model_{epoch + 1}.pth"))

        writer.close()


if __name__ == '__main__':
    opt = parse_opt()
    train(opt)
