import os
import warnings
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
import torchvision.transforms as T
from utils.config import sys_root

warnings.filterwarnings("ignore", category=UserWarning)

class BaseDataset(Dataset):
    def __init__(self, args, mode):
        self.args = args
        self.mode = mode

    def __len__(self):
        pass

    def __getitem__(self, idx):
        pass

    class ToNumpy:
        def __call__(self, sample):
            return np.array(sample)

class WaterScenes(BaseDataset):
    def __init__(self, args, mode, num_sample_test=70):
        super(WaterScenes, self).__init__(args, mode)

        self.args = args
        self.mode = mode
        self.num_sample_test = num_sample_test # 输入随机采样率
        assert self.num_sample_test in [30, 50, 70]

        self.use_raw_depth_as_input = self.args.use_raw_depth_as_input
        self.use_rel_depth = False

        if mode != 'train' and mode != 'val' and mode != 'test':
            raise NotImplementedError

        data_root = '{}/WaterScenes/'.format(sys_root)

        sample_list_path = os.path.join(data_root, mode+'.txt')
        if os.path.exists(sample_list_path):
            with open(sample_list_path, 'r') as f:
                self.sample_list_rgb = f.readlines()
        else:
            raise FileExistsError

        # train输入5帧随机30~70雷达，gt是5帧分割雷达，test、val输入5帧随机30/50/70雷达
        self.sample_list_rgb = [os.path.join(data_root, i.strip()) for i in self.sample_list_rgb]
        self.sample_list_gt = [i.replace('/images/', '/radar_npz/VOCradar320_seg_5_frames/').replace('.jpg', '.npz') for i in self.sample_list_rgb]
        if mode == 'train':
            self.sample_list_dep = [i.replace('/images/', '/radar_npz/VOCradar320_seg_5_frames_rand3070/').replace('.jpg', '.npz') for i in self.sample_list_rgb]
        else:
            if self.use_raw_depth_as_input:
                # self.sample_list_dep = [i.replace('/images/', '/radar_npz/VOCradar320_5_frames/').replace('.jpg', '.npz') for i in self.sample_list_rgb] # 未分割的5帧点云，不额外采样
                self.sample_list_dep = [i.replace('/images/', '/radar_npz/VOCradar320_seg_5_frames/').replace('.jpg', '.npz') for i in self.sample_list_rgb] # 使用深度和语义标签剔除5帧点云的无效点，不额外采样
            else:
                self.sample_list_dep = [i.replace('/images/', '/radar_npz/VOCradar320_seg_5_frames_rand'+str(int(self.num_sample_test))+'/').replace('.jpg', '.npz') for i in self.sample_list_rgb]

        if self.use_rel_depth:
            self.sample_list_rel = [i.replace('/images/', '/depth/depth_anything_v2_l_fit/').replace('.jpg', '.png') for i in self.sample_list_rgb]

        if args.patch_height is None:
            args.patch_height = 180
            args.patch_width = 320

        self.height = args.patch_height
        self.width = args.patch_width

        if self.height == 180:
            self.crop_size = (180,320)
        elif self.height == 1080:
            self.crop_size = (1080,1920)
        else:
            raise print("Check the self.height !!")

    def __len__(self):
        return len(self.sample_list_rgb)

    def __getitem__(self, idx):
        rgb_path = self.sample_list_rgb[idx]
        dep_path = self.sample_list_dep[idx]
        gt_path = self.sample_list_gt[idx]
        rgb = Image.open(rgb_path).convert('RGB')
        dep = np.load(dep_path)['arr_0'][0] # 距离(m)，h*w
        gt = np.load(gt_path)['arr_0'][0]

        if dep.shape == gt.shape == (320, 320):
            dep = dep[70:250, :] # 裁剪，180*320
            gt = gt[70:250, :]

        dep = np.array(dep).astype(np.float32)
        dep = Image.fromarray(dep.astype('float32'), mode='F')

        gt = np.array(gt).astype(np.float32)
        gt = Image.fromarray(gt.astype('float32'), mode='F')

        t_rgb = T.Compose([
            T.Resize(self.height),
            T.CenterCrop(self.crop_size),
            T.ToTensor(),
            T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
        ])
        t_dep = T.Compose([
            T.Resize(self.height, T.transforms.InterpolationMode.NEAREST),
            T.CenterCrop(self.crop_size),
            self.ToNumpy(),
            T.ToTensor()
        ])
        rgb = t_rgb(rgb)
        dep = t_dep(dep)
        gt = t_dep(gt)
        fn, _ = os.path.splitext(os.path.basename(rgb_path))

        output = {'rgb': rgb, 'dep': dep, 'gt': gt, 'K': dep, 'fn': fn}

        if self.use_rel_depth:
            rel_path = self.sample_list_rel[idx]
            rel = Image.open(rel_path).convert('L')

            t_rel = T.Compose([
                T.Resize(self.height),
                T.CenterCrop(self.crop_size),
                self.ToNumpy()
            ])
            rel = t_rel(rel)

            output['rel'] = rel

        return output
