'''
Function:
    load the ADE20k dataset
Author:
    Zhenchao Jin
'''
import os
import pandas as pd
from .base import *


'''ADE20k dataset'''
class VSPWDataset(BaseDataset):
    num_classes = 150
    classnames = ['wall', 'ceiling', 'door', 'stair', 'ladder', 'escalator', 'Playground_slide', 'handrail_or_fence', 'window',  
    'rail', 'goal', 'pillar', 'pole', 'floor', 'ground', 'grass', 'sand', 'athletic_field', 'road', 'path', 'crosswalk', 'building', 'house',
    'bridge', 'tower', 'windmill', 'well_or_well_lid', 'other_construction', 'sky', 'mountain', 'stone', 'wood', 'ice', 'snowfield',
    'grandstand', 'sea', 'river', 'lake', 'waterfall', 'water', 'billboard_or_Bulletin_Board', 'sculpture', 'pipeline', 'flag',
    'parasol_or_umbrella', 'cushion_or_carpet', 'tent', 'roadblock', 'car', 'bus', 'truck', 'bicycle', 'motorcycle', 'wheeled_machine',
    'ship_or_boat', 'raft', 'airplane', 'tyre', 'traffic_light', 'lamp', 'person', 'cat', 'dog', 'horse', 'cattle', 'other_animal', 'tree', 
    'flower', 'other_plant', 'toy', 'ball_net', 'backboard', 'skateboard', 'bat', 'ball', 'cupboard_or_showcase_or_storage_rack', 'box', 
    'traveling_case_or_trolley_case', 'basket', 'bag_or_package', 'trash_can', 'cage', 'plate', 'tub_or_bowl_or_pot', 'bottle_or_cup', 
    'barrel', 'fishbowl', 'bed', 'pillow', 'table_or_desk', 'chair_or_seat', 'bench', 'sofa', 'shelf', 'bathtub', 'gun', 'commode', 'roaster', 
    'other_machine', 'refrigerator', 'washing_machine', 'Microwave_oven', 'fan', 'curtain', 'textiles', 'clothes', 'painting_or_poster', 
    'mirror', 'flower_pot_or_vase', 'clock', 'book', 'tool', 'blackboard', 'tissue', 'screen_or_television', 'computer', 'printer', 
    'Mobile_phone', 'keyboard', 'other_electronic_product', 'fruit', 'food', 'instrument', 'train']
    while num_classes > len(classnames):
        classnames += ['none']
    clsid2label = {0: 255}
    for i in range(1, num_classes+1): clsid2label[i] = i - 1
    assert num_classes == len(classnames)
    def __init__(self, mode, logger_handle, dataset_cfg, **kwargs):
        super(VSPWDataset, self).__init__(mode, logger_handle, dataset_cfg, **kwargs)
        # obtain the dirs
        rootdir = dataset_cfg['rootdir']
        self.dataroot = os.path.join(rootdir, 'data')
        # obatin imageids
        self.split = dataset_cfg['set']
        self.clip_num = dataset_cfg['clip_num']
        self.dilation = dataset_cfg['dilation'].split(',')
        self.dilation = [int(dil) for dil in self.dilation]
        self.random_select = dataset_cfg['random_select']
        self.sequence_range = dataset_cfg['sequence_range']

        with open(os.path.join(rootdir,self.split+'.txt')) as f:
            lines=f.readlines()
            self.videolists = [line[:-1] for line in lines]
        self.imageids = {}
        for video in self.videolists:
            v_path = os.path.join(self.dataroot,video,'origin')
            imglist = sorted(os.listdir(v_path))
            self.imageids[video]=imglist
    
    def segm_transform(self, segm):
        # to tensor, -1 to 149
        segm[segm==0]=255
        segm = segm-1
        segm[segm==254]=255
        return segm
    '''pull item'''
    
    def __getitem__(self, index):
        video = self.videolists[index]
        imglist = self.imageids[video]
        
        if self.mode == 'TRAIN':
            if np.random.random()<0.5:
                imglist=imglist[::-1]
            if self.random_select:
                imglist_s = imglist[:-self.sequence_range]
                while len(imglist_s)<1:
                    imglist.append(imglist[-1])
                    imglist_s = imglist[:-self.sequence_range]
                
                idx = np.random.choice(list(range(len(imglist_s))))
                import random
                dils = random.sample(range(1, self.sequence_range+1), self.clip_num-1)
                dils = sorted(dils)
                this_step=[idx]
                for dil in dils:
                    this_step.append(idx+dil)
            else:
                imglist_s = imglist[:-self.dilation[-1]]
                while len(imglist_s)<1:
                    imglist.append(imglist[-1])
                    imglist_s = imglist[:-self.dilation[-1]]
            
                idx = np.random.choice(list(range(len(imglist_s))))
                this_step=[idx]
                for dil in self.dilation:
                    this_step.append(idx+dil)
            
            sample = self.read(os.path.join(self.dataroot,video,'origin',imglist[this_step[0]]),
                             os.path.join(self.dataroot,video,'mask',imglist[this_step[0]]).replace('.jpg','.png'), 
                             self.dataset_cfg.get('with_ann', True))
            clip_imgs = []
            clip_segs = []
            for i in this_step[1:]:
                temp = self.read(os.path.join(self.dataroot,video,'origin',imglist[i]),
                             os.path.join(self.dataroot,video,'mask',imglist[i]).replace('.jpg','.png'), 
                             self.dataset_cfg.get('with_ann', True))
                clip_imgs.append(temp['image'])
                clip_segs.append(self.segm_transform(temp['segmentation']))
            sample['clip_imgs'] = clip_imgs
            sample['clip_segs'] = clip_segs
            sample['segmentation'] = self.segm_transform(sample['segmentation'])
            sample = self.synctransform(sample, 'without_totensor_normalize_pad')
            sample['edge'] = self.generateedge(sample['segmentation'].copy())
            sample = self.synctransform(sample, 'only_totensor_normalize_pad')
        else:
            sample = self.read(os.path.join(self.dataroot,video,'origin',imglist[0]),
                             os.path.join(self.dataroot,video,'mask',imglist[0]).replace('.jpg','.png'), 
                             self.dataset_cfg.get('with_ann', True))
            clip_imgs = []
            clip_segs = []
            for i in imglist:
                temp = self.read(os.path.join(self.dataroot,video,'origin',i),
                             os.path.join(self.dataroot,video,'mask',i).replace('.jpg','.png'), 
                             self.dataset_cfg.get('with_ann', True))
                clip_imgs.append(temp['image'])
                clip_segs.append(temp['segmentation'])
            sample['clip_imgs'] = clip_imgs
            sample['clip_segs'] = clip_segs
            sample = self.synctransform(sample, 'all')
        return sample
    '''length'''
    def __len__(self):
        return len(self.imageids)