import os
import torch
import numpy as np

from PIL import Image
from typing import Dict
from torch.utils.data import Dataset

from ..ext_transforms import *


class CityScapes(Dataset):
    def __init__(self, split, cfg: Dict[str, dict]):
        assert split in ['train', 'val'], "split must be one of 'train', 'val'"
        
        self.root_dir = cfg.get('data_root')
        self.size = cfg.get('input_size')
        self.images_dir = os.path.join(self.root_dir, 'leftImg8bit', split)
        self.labels_dir = os.path.join(self.root_dir, 'gtFine', split)
        self.image_label_pairs = self._gather_image_label_pairs()
        
        if split == 'train':
            self.transform = ExtCompose([
                ExtRandomCrop(size=self.size),
                ExtRandomHorizontalFlip(),
                ExtToTensor(),
                ExtNormalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
            ])
        else:
            self.transform = ExtCompose([
                ExtToTensor(),
                ExtNormalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
            ])

        self.label_to_trainId = {
            0: 255,   # 'unlabeled'
            1: 255,   # 'ego vehicle'
            2: 255,   # 'rectification border'
            3: 255,   # 'out of roi'
            4: 255,   # 'static'
            5: 255,   # 'dynamic'
            6: 255,   # 'ground'
            7: 0,     # 'road'
            8: 1,     # 'sidewalk'
            9: 255,   # 'parking'
            10: 255,  # 'rail track'
            11: 2,    # 'building'
            12: 3,    # 'wall'
            13: 4,    # 'fence'
            14: 255,  # 'guard rail'
            15: 255,  # 'bridge'
            16: 255,  # 'tunnel'
            17: 5,    # 'pole'
            18: 255,  # 'polegroup'
            19: 6,    # 'traffic light'
            20: 7,    # 'traffic sign'
            21: 8,    # 'vegetation'
            22: 9,    # 'terrain'
            23: 10,   # 'sky'
            24: 11,   # 'person'
            25: 12,   # 'rider'
            26: 13,   # 'car'
            27: 14,   # 'truck'
            28: 15,   # 'bus'
            29: 255,  # 'caravan'
            30: 255,  # 'trailer'
            31: 16,   # 'train'
            32: 17,   # 'motorcycle'
            33: 18,   # 'bicycle'
        }

    def _gather_image_label_pairs(self):
        image_label_pairs = []
        cities = sorted(os.listdir(self.images_dir))
        for city in cities:
            city_images_dir = os.path.join(self.images_dir, city)
            city_labels_dir = os.path.join(self.labels_dir, city)
            if not os.path.isdir(city_images_dir) or not os.path.isdir(city_labels_dir):
                continue
            image_files = sorted([
                f for f in os.listdir(city_images_dir)
                if f.endswith('_leftImg8bit.png')
            ])
            for img_file in image_files:
                base_name = img_file.replace('_leftImg8bit.png', '')
                label_file = f"{base_name}_gtFine_labelIds.png"
                label_path = os.path.join(city_labels_dir, label_file)
                img_path = os.path.join(city_images_dir, img_file)
                if os.path.exists(label_path):
                    image_label_pairs.append((img_path, label_path))
                else:
                    print(f"Warning: Label file {label_path} does not exist for image {img_path}")
        return image_label_pairs

    def __len__(self):
        return len(self.image_label_pairs)

    def __getitem__(self, index):
        img_path, label_path = self.image_label_pairs[index]
        
        image = Image.open(img_path).convert('RGB')
        label = Image.open(label_path)
        if self.transform:
            image, label = self.transform(image, label)

        label = np.array(label, dtype=np.int64)
        label = np.vectorize(self.label_to_trainId.get)(label)
        label = torch.tensor(label, dtype=torch.long)

        return image, label
