# Ultralytics YOLO 🚀, GPL-3.0 license
from collections import defaultdict
from copy import copy
from pathlib import Path
from typing import Any, Dict

import torch
from torch.cuda import amp
from torch.utils.data import DataLoader
from torchvision import transforms

from ultralytics import __version__
from ultralytics.yolo.v8.detect import DetectionTrainer
from ultralytics.yolo.cfg import get_cfg
from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, RANK, SETTINGS, callbacks, yaml_save
from ultralytics.yolo.utils.torch_utils import init_seeds, select_device
from ultralytics.yolo.utils.checks import print_args
from ultralytics.yolo.utils.files import increment_path

from models.recurrent import RecurrentModel
from .data import SOD, sod_collate
from .process import preprocess_batch, postprocess_batch
from .val import SODValidator

# BaseTrainer python usage
class SODTrainer(DetectionTrainer):
    def __init__(self, cfg=DEFAULT_CFG, overrides=None):
        """
        Initializes the BaseTrainer class.

        Args:
            cfg (str, optional): Path to a configuration file. Defaults to DEFAULT_CFG.
            overrides (dict, optional): Configuration overrides. Defaults to None.
        """
        self.args = get_cfg(cfg, overrides)
        self.device = select_device(self.args.device, self.args.batch)
        self.check_resume()
        self.console = LOGGER
        self.validator = None
        self.model = None
        init_seeds(self.args.seed + 1 + RANK, deterministic=self.args.deterministic)

        # Dirs
        project = self.args.project or Path(SETTINGS['runs_dir']) / self.args.task
        name = self.args.name or f"{self.args.mode}"
        if hasattr(self.args, 'save_dir'):
            self.save_dir = Path(self.args.save_dir)
        else:
            self.save_dir = Path(
                increment_path(Path(project) / name, exist_ok=self.args.exist_ok if RANK in {-1, 0} else True))
        self.wdir = self.save_dir / 'weights'  # weights dir
        if RANK in {-1, 0}:
            self.wdir.mkdir(parents=True, exist_ok=True)  # make dir
            self.args.save_dir = str(self.save_dir)
            yaml_save(self.save_dir / 'args.yaml', vars(self.args))  # save run args
        self.last, self.best = self.wdir / 'last.pt', self.wdir / 'best.pt'  # checkpoint paths

        self.batch_size = self.args.batch
        self.epochs = self.args.epochs
        self.start_epoch = 0
        if RANK == -1:
            print_args(vars(self.args))

        # Device
        # self.amp = self.device.type != 'cpu'
        self.amp = False
        self.scaler = amp.GradScaler(enabled=self.amp)
        if self.device.type == 'cpu':
            self.args.workers = 0  # faster CPU training as time dominated by inference, not dataloading

        # Model and Dataloaders.
        self.model = self.args.model
        self.data = self.args.data
        self.trainset, self.testset = self.get_dataset(self.data)
        self.ema = None

        # Optimization utils init
        self.lf = None
        self.scheduler = None

        # Epoch level metrics
        self.best_fitness = None
        self.fitness = None
        self.loss = None
        self.tloss = None
        self.loss_names = ['Loss']
        self.csv = self.save_dir / 'results.csv'
        self.plot_idx = [0, 1, 2]

        # Callbacks
        self.callbacks = defaultdict(list, callbacks.default_callbacks)  # add callbacks
        if RANK in {0, -1}:
            callbacks.add_integration_callbacks(self)


    def get_dataset(self, data: Dict[str, Any]):
        train_dataset = None
        test_dataset = None
        padding = lambda x, y, d: transforms.Resize((d * (x // d), d * (y // d)), antialias = True)
        seq_len = data["seq_len"] if "seq_len" in data else 10
        if data["name"] == "pku_davis_sod":
            train_dataset = SOD(
                data["path"],
                "train",
                transform = padding(240, 320, 64),
                seq_len = seq_len,
                augment = True
            )
            test_dataset = SOD(
                data["path"],
                "val",
                transform = padding(240, 320, 64),
                seq_len = seq_len,
                augment = False
            )
            data["nc"] = len(train_dataset.classes.keys())
            data["names"] = train_dataset.classes
        else:
            return NotImplementedError("Unknown dataset %s" % (data["name"],))
        return train_dataset, test_dataset


    def get_dataloader(self, dataset_path, batch_size, mode="train", rank=0):
        # TODO: manage splits differently
        # calculate stride - check if model is initialized
        return DataLoader(
            dataset_path,
            batch_size = batch_size,
            num_workers = self.args.workers,
            collate_fn = sod_collate,
            drop_last = True
        )


    def get_model(self, cfg=None, weights=None, verbose=True):
        model = RecurrentModel(cfg, ch=5, nc=self.data["nc"], verbose=verbose)
        if weights:
            model.load(weights)

        return model


    def preprocess_batch(self, batch):
        return preprocess_batch(batch, self.device, torch.half if self.args.half else torch.float)


    def criterion(self, preds, batch):
        res = super().criterion(preds, batch)
        batch = postprocess_batch(batch, self.device, torch.half if self.args.half else torch.float)
        return res


    def get_validator(self):
        self.loss_names = 'box_loss', 'cls_loss', 'dfl_loss'
        return SODValidator(self.test_loader, save_dir=self.save_dir, logger=self.console, args=copy(self.args))


def train(cfg=DEFAULT_CFG, use_python=False):
    model = cfg.model or "yolov8n.pt"
    data = cfg.data or "coco128.yaml"  # or yolo.ClassificationDataset("mnist")
    device = cfg.device if cfg.device is not None else ''

    args = dict(model=model, data=data, device=device)
    if use_python:
        from ultralytics import YOLO
        YOLO(model).train(**args)
    else:
        trainer = SODTrainer(overrides=args)
        trainer.train()


if __name__ == "__main__":
    train()
