from pathlib import Path

import pandas as pd
import torch
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.dataset import T_co
from torchvision import transforms
from torchvision.transforms import Compose


def _read_csv(csv_path: Path) -> pd.DataFrame:
    return pd.read_csv(csv_path, index_col=0)


def _img_transform() -> Compose:
    return transforms.Compose([
        transforms.Resize(400),
        transforms.ToTensor()
    ])


def _mask_transform() -> Compose:
    return transforms.Compose([
        transforms.Resize(400),
        transforms.ToTensor(),
    ])


class DatasetCity(Dataset):
    def __init__(self,
                 datapath: Path,
                 fold=None,
                 transform=None,
                 split=None,
                 shot=1,
                 use_original_imgsize=None):
        self.benchmark = 'city'
        self.img_transform = transform if transform is not None else _img_transform()
        self.mask_transform = _mask_transform()
        self.shot = shot

        self.root_path = Path(datapath) / Path("city")

        self.meta_dataset = self._create_meta_data()
        self.class_ids = range(3)

    def __getitem__(self, index) -> T_co:
        raw: dict = self.meta_dataset[index]

        class_id = raw["class_id"]

        query_img_path = self.root_path / Path(raw["query_img"])
        query_img_raw = Image.open(query_img_path).convert("RGB")
        query_img_tf = self.img_transform(query_img_raw)

        query_mask_path = self.root_path / Path(raw["query_mask"])
        query_mask_raw = Image.open(query_mask_path).convert("L")
        query_mask_tf = self.mask_transform(query_mask_raw)
        query_mask_tf = torch.squeeze(query_mask_tf)

        support_imgs_tf = []
        for item in raw["support_imgs"]:
            support_img_path = self.root_path / item
            support_img_raw = Image.open(support_img_path).convert("RGB")
            support_img_tf = self.img_transform(support_img_raw)
            support_imgs_tf.append(support_img_tf)
        support_imgs_stack = torch.stack(support_imgs_tf)

        support_masks_tf = []
        for item in raw["support_masks"]:
            support_mask_path = self.root_path / item
            support_mask_raw = Image.open(support_mask_path).convert("L")
            support_mask_tf = self.mask_transform(support_mask_raw)
            support_mask_tf = torch.squeeze(support_mask_tf)
            support_masks_tf.append(support_mask_tf)
        support_mask_stack = torch.stack(support_masks_tf)

        batch = {
            "class_id": torch.tensor(class_id),

            "query_img": query_img_tf,
            "query_mask": query_mask_tf,

            "support_imgs": support_imgs_stack,
            "support_masks": support_mask_stack
        }

        return batch

    def __len__(self):
        return len(self.meta_dataset)

    def _get_clear_df(self, label_int: int) -> dict:
        """
        1. Select columns.
        1. Remove rows with NAN values.
        1. Rename "mask_i" column to "mask"
        """
        data_set = _read_csv(self.root_path / Path("city.csv"))

        mask_name = f"mask_{label_int}"

        data_set = data_set[["img", "height", "width", mask_name]]
        data_set.dropna(inplace=True)
        data_set = data_set.rename(columns={mask_name: "mask"})
        return data_set.to_dict(orient='index')

    def _to_shot_metadata(self, class_id: int, data: dict) -> list[dict]:
        """
        Params:
            class_id:
            data: The city dataset of label vale i, out put from _get_clear_df
        """
        query_number = int(len(data) / (self.shot + 1))
        assert query_number > 0, "Cannot shot!!!"
        keys = enumerate(data.keys())

        dict = []
        for _ in range(query_number):
            _, key = next(keys)
            query_img = data[key]['img']
            query_mask = data[key]['mask']

            support_imgs = []
            support_masks = []
            for i in range(self.shot):
                _, key = next(keys)
                support_imgs.append(data[key]["img"])
                support_masks.append(data[key]["mask"])

            dict.append({
                "class_id": class_id,
                "query_img": query_img,
                "query_mask": query_mask,
                "support_imgs": support_imgs,
                "support_masks": support_masks
            })
        return dict

    def _create_meta_data(self) -> list:
        """
        The city datasets has 7 classes, which Separated from the original image.
        """
        result = []
        for i in range(1, 3):
            data_i = self._get_clear_df(i)
            result.extend(self._to_shot_metadata(i, data_i))
        return result


def get_city_loader(data_root: Path, shot) -> DataLoader:
    dataset = DatasetCity(data_root, shot)
    dataloader = DataLoader(dataset,
                            batch_size=1,
                            shuffle=True,
                            num_workers=0)
    return dataloader
