import os
from typing import List, Optional, Tuple

import cv2
import numpy as np
import pytorch_lightning as pl
import torch
from albumentations import BboxParams, Compose, HueSaturationValue, ShiftScaleRotate
from torch.utils.data import DataLoader, dataset
from torchvision import transforms


class Dataset(dataset.Dataset):
    def __init__(
        self,
        images_path: list[str],
        labels_path: list[str],
        is_train: bool,
        classes_num: int,
        grid_size: int,
        bounding_box_num: int,
    ) -> None:
        super().__init__()
        self.images = images_path
        self.labels = labels_path
        self.is_train = is_train
        self.augmentation_transformer = Compose(
            [
                ShiftScaleRotate(
                    shift_limit=0.2, scale_limit=0.2, rotate_limit=0, p=1.0
                ),
                HueSaturationValue(
                    sat_shift_limit=50, val_shift_limit=50, hue_shift_limit=0, p=1.0
                ),
            ],
            bbox_params=BboxParams(format="yolo", label_fields=["class_labels"]),
        )
        self.image_transformer = transforms.ToTensor()
        self.class_num = classes_num
        self.grid_size = grid_size
        self.bounding_box_num = bounding_box_num

    def __len__(self):
        return len(self.images)

    def __getitem__(self, index) -> Tuple[torch.Tensor, torch.Tensor]:
        image_path = self.images[index]
        label_path = self.labels[index]
        assert (
            os.path.splitext(os.path.basename(image_path))[0]
            == os.path.splitext(os.path.basename(label_path))[0]
        )

        image, label = self._process_sample(image_path, label_path)
        image = self.image_transformer(image)
        label = torch.from_numpy(label)
        return image, label

    def _augment(self, image, bboxes, class_labels):
        augmented = self.augmentation_transformer(
            image=image, bboxes=bboxes, class_labels=class_labels
        )
        return augmented["image"], augmented["bboxes"], augmented["class_labels"]

    def _process_sample(
        self, image_path: str, label_path: str
    ) -> Tuple[np.ndarray, np.ndarray]:
        # image
        image = cv2.imread(image_path)
        image = cv2.resize(image, (448, 448))
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        # label
        class_ids = []
        bboxes = []
        with open(label_path, "r") as f:
            for line in f.readlines():
                class_id, center_x, center_y, width, height = line.strip().split(" ")
                class_id = int(class_id)
                center_x = float(center_x)
                center_y = float(center_y)
                width = float(width)
                height = float(height)
                class_ids.append(class_id)
                bboxes.append((center_x, center_y, width, height))

        # data augmentation
        if self.is_train:
            image, bboxes, class_ids = self._augment(image, bboxes, class_ids)

        label = np.zeros(
            (
                self.grid_size,
                self.grid_size,
                5 * self.bounding_box_num + self.class_num,
            ),
            dtype=np.float32,
        )
        for class_id, bbox in zip(class_ids, bboxes):
            center_x, center_y, width, height = bbox[0], bbox[1], bbox[2], bbox[3]
            x_idx = int(center_x * self.grid_size)
            y_idx = int(center_y * self.grid_size)
            # NOTE: When there are multiple bounding boxes in a cell, only the last one or the box whose area is biggest is saved, is it right?
            # NOTE: Convert the image relative coordinates to cell relative coordinates.
            for i in range(self.bounding_box_num):
                label[x_idx, y_idx, 0 + 5 * i] = center_x * self.grid_size - x_idx
                label[x_idx, y_idx, 1 + 5 * i] = center_y * self.grid_size - y_idx
                label[x_idx, y_idx, 2 + 5 * i] = width
                label[x_idx, y_idx, 3 + 5 * i] = height
                label[x_idx, y_idx, 4 + 5 * i] = 1
            label[x_idx, y_idx, 5 * self.bounding_box_num + class_id] = 1
        return image, label


class YoloDataLoader(pl.LightningDataModule):
    def __init__(
        self,
        batch_size: int,
        workers_num: int,
        train_dataset: Dataset,
        val_dataset: Dataset,
    ) -> None:
        super().__init__()
        self.batch_size = batch_size
        self.num_workers = workers_num
        self.train_dataset = train_dataset
        self.val_dataset = val_dataset

    def train_dataloader(self) -> DataLoader:
        return DataLoader(
            self.train_dataset,
            batch_size=self.batch_size,
            num_workers=self.num_workers,
            shuffle=True,
            persistent_workers=True if self.num_workers > 0 else False,
        )

    def val_dataloader(self) -> DataLoader:
        return DataLoader(
            self.val_dataset,
            batch_size=self.batch_size,
            num_workers=self.num_workers,
            shuffle=False,
            persistent_workers=True if self.num_workers > 0 else False,
        )


def create_dataloader(
    data_path: str,
    data_series: str,
    data_version: List[str],
    auto_download: bool,
    grid_size: int,
    bounding_box_num: int,
    batch_size: int,
    workers_num: int,
) -> YoloDataLoader:

    if data_series == "VOC":
        from utils.dataset.voc import VOCUtils

        data_utils = VOCUtils(data_path, data_version, auto_download)
    else:
        raise NotImplementedError

    train_images_root_path = data_utils.train_images_list
    train_labels_root_path = data_utils.train_labels_list
    val_images_root_path = data_utils.val_images_list
    val_labels_root_path = data_utils.val_labels_list
    train_dataset = Dataset(
        train_images_root_path,
        train_labels_root_path,
        True,
        data_utils.class_num,
        grid_size,
        bounding_box_num,
    )
    val_dataset = Dataset(
        val_images_root_path,
        val_labels_root_path,
        False,
        data_utils.class_num,
        grid_size,
        bounding_box_num,
    )
    return (
        YoloDataLoader(batch_size, workers_num, train_dataset, val_dataset),
        data_utils.class_num,
    )
