import os
import os.path as osp
import json

import torch
from torch.utils.data import Dataset, DataLoader
import torch.distributed as dist
import cv2
import numpy as np
from torchvision import transforms as t_trans
from albumentations import *
from PIL import Image

class CityScapes(Dataset):
    '''
    '''
    def __init__(self, dataroot,  mode='train'):
        super(CityScapes, self).__init__()
        assert mode in ('train', 'val', 'test')

        ignore_label = 255
        self.label_mapping = {-1: ignore_label, 0: ignore_label,
                              1: ignore_label, 2: ignore_label,
                              3: ignore_label, 4: ignore_label,
                              5: ignore_label, 6: ignore_label,
                              7: 0, 8: 1, 9: ignore_label,
                              10: ignore_label, 11: 2, 12: 3,
                              13: 4, 14: ignore_label, 15: ignore_label,
                              16: ignore_label, 17: 5, 18: ignore_label,
                              19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11,
                              25: 12, 26: 13, 27: 14, 28: 15,
                              29: ignore_label, 30: ignore_label,
                              31: 16, 32: 17, 33: 18}

        self.mode = mode
        self.trans = t_trans.Compose([
            t_trans.ToTensor(),
        ])

        annpath = os.path.join(dataroot, mode+'.txt')

        with open(annpath, 'r') as fr:
            pairs = fr.read().splitlines()
        self.img_paths, self.lb_paths = [], []
        for pair in pairs:
            imgpth, lbpth = pair.split(',')
            self.img_paths.append(osp.join(dataroot, imgpth))
            self.lb_paths.append(osp.join(dataroot, lbpth))

        assert len(self.img_paths) == len(self.lb_paths)
        self.len = len(self.img_paths)
        self.train_aug = Compose([
            #RandomScale(scale_limit=(-0.2, 0.2), p=0.5),
            RGBShift(p=0.5),
            RandomCrop(height=512, width=1024, p=1),
            HorizontalFlip(p=0.5),
        ])
        self.test_aug = Compose([
            #CenterCrop(height=640, width=960, p=1),
            Resize(height=1024, width=2048,p=1.0)
        ])

    def convert_label(self, label, inverse=False):
        temp = label.copy()
        if inverse:
            for v, k in self.label_mapping.items():
                label[temp == k] = v
        else:
            for k, v in self.label_mapping.items():
                label[temp == k] = v
        return label
    def __getitem__(self, idx):
        impth, lbpth = self.img_paths[idx], self.lb_paths[idx]
        img = cv2.imread(impth)[:, :, ::-1].copy()
        label = cv2.imread(lbpth, 0)

        if self.mode =='train':
            auged = self.train_aug(image=img, mask=label)
        else:
            auged = self.test_aug(image=img, mask=label)

        ig_data = auged['image']
        mask = auged['mask']
        mask = mask[:, :]
        ml = self.convert_label(mask)

        ig_data = Image.fromarray(ig_data)
        ig_data = self.trans(ig_data)
        mask = torch.from_numpy(ml).long()
        return ig_data, mask

    def __len__(self):
        return self.len








if __name__ == "__main__":
    from tqdm import tqdm
    from torch.utils.data import DataLoader
    ds = CityScapes('/home/dsl/dataset/cityspace', mode='val')
    dl = DataLoader(ds,
                    batch_size = 4,
                    shuffle = True,
                    num_workers = 4,
                    drop_last = True)
    for imgs, label in dl:
        print(len(imgs))
        for el in imgs:
            print(el.size())
        break