import cv2
import os
import glob
import numpy as np
import torch
from torch.utils.data import Dataset
from torchvision import transforms

mean = [0.406, 0.485, 0.456]
std = [0.225, 0.229, 0.224]

class CityScapeTrainDataset(Dataset):
    def __init__(self, cityscape_path) -> None:
        super().__init__()
        self.path = cityscape_path
        # 待修改
        # images = /workspace/wzj/dataset/cityscape/leftImg8bit/train/aachen/aachen_000114_000019_leftImg8bit.png
        # gt = /workspace/wzj/dataset/cityscape/gtFine/train/aachen/aachen_000114_000019_gtFine_labelTrainIds.png
        self.images = glob.glob(os.path.join(cityscape_path, "leftImg8bit", "train") + "/*/*.png")
        # self.masks = glob.glob(os.path.join(cityscape_path, "gtFine", "train") + "/*/*_labelTrainIds.png")
        # self.masks = glob.glob(os.path.join(cityscape_path, "gtFine", "train") + "/*/*_labelTrain19Ids.png")
        self.transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=mean, std=std)
        ])
    
    def __getitem__(self, i):
        # relative_path = self.images[i].split("leftImg8bit")[1].split("leftImg8bit")[0] + "labelTrainIds.png"
        relative_path = self.images[i].split("leftImg8bit")[1] + "gtFine_labelTrain19Ids.png"
        # relative_path = self.images[i].split("leftImg8bit")[1] + "gtFine_labelTrainIds.png"
        gt = os.path.join(self.path, ("gtFine" + relative_path))
        
        img = cv2.imread(self.images[i], -1)
        # 真值是单通道图，即灰度图形式
        mask = cv2.imread(gt, -1)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        # 裁剪原图,便于训练
        img = cv2.resize(img, (1024, 512), interpolation=cv2.INTER_LINEAR)
        mask = cv2.resize(mask, (1024, 512), interpolation=cv2.INTER_NEAREST)
        # 转tensor
        img = self.transform(img)
        mask = torch.tensor(mask)
        # 待修改
        # rect = transforms.RandomCrop.get_params(img, ((352, 1216)))
        # img = transforms.functional.crop(img, *rect)
        # mask = transforms.functional.crop(mask, *rect)
            
        return img, mask
        # return mask
    
    def __len__(self):
        return len(self.images)

class CityScapeValDataset(Dataset):
    def __init__(self, cityscape_path) -> None:
        super().__init__()
        self.path = cityscape_path
        self.images = glob.glob(os.path.join(cityscape_path, "leftImg8bit", "val") + "/*/*.png")
        # self.masks = glob.glob(os.path.join(cityscape_path, "gtFine", "val") + "/*/*_labelTrainIds.png")
        # self.masks = glob.glob(os.path.join(cityscape_path, "gtFine", "val") + "/*/*_labelTrain19Ids.png")
        self.transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=mean, std=std)
        ])
    
    def __getitem__(self, i):
        relative_path = self.images[i].split("leftImg8bit")[1] + "gtFine_labelTrain19Ids.png"
        # relative_path = self.images[i].split("leftImg8bit")[1] + "gtFine_labelTrainIds.png"
        gt = os.path.join(self.path, ("gtFine" + relative_path))
        # print(self.images[i])
        # print(gt)
        cvimg = cv2.imread(self.images[i], -1)
        # 真值是单通道图，即灰度图形式
        mask = cv2.imread(gt, -1)
        cvimg = cv2.resize(cvimg, (1024, 512), interpolation=cv2.INTER_LINEAR)
        mask = cv2.resize(mask, (1024, 512), interpolation=cv2.INTER_NEAREST)
        img = self.transform(cvimg)
        mask = torch.tensor(mask)
        
        return img, mask
        # return img, cvimg, mask

    def __len__(self):
        return (len(self.images))

def loadcityscape(city_path, mode="train"):
    if mode == "train":
        return CityScapeTrainDataset(city_path)
    if mode == "val":
        return CityScapeValDataset(city_path)


if __name__ == '__main__':
    data = CityScapeTrainDataset("/workspace/wzj/dataset/cityscape")
    # for img, mask in data:
    #     print(img)
    a = data[122]
    
    
    
