import os
import numpy as np
import cv2
import luojianet
import luojianet.nn as nn
import luojianet.ops as ops
from luojianet import nn, ops, Parameter, Tensor
from .base import BasetestDataset

class VaihingenDataset(BasetestDataset):
    num_classes = 6
    classnames = ['impervious_surface', 'building', 'low_vegetation', 'tree', 'car', 'clutter']
    def __init__(self, mode, logger_handle, dataset_cfg):
        super(VaihingenDataset, self).__init__(mode, logger_handle, dataset_cfg)
        # obtain the dirs
        setmap_dict = {'train': 'train', 'val': 'val'}
        rootdir = dataset_cfg['rootdir']
        self.image_dir = os.path.join(rootdir, 'img_dir/' + f"{setmap_dict[dataset_cfg['set']]}")
        self.ann_dir = os.path.join(rootdir, 'ann_dir/'+ f"{setmap_dict[dataset_cfg['set']]}")
        self.imageids = []
        if self.mode == 'TRAIN':
            # print("train idx txt file: ", os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '.txt'))
            print("train idx txt file: ", os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '_image_id.txt'))
        else:
            # print("val idx txt file: ", os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '.txt'))
            print("val idx txt file: ", os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '_image_id.txt'))
       
        # for line in open(os.path.join(rootdir, setmap_dict[dataset_cfg['set']]+'.txt'), 'r').readlines():
        for line in open(os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '_image_id.txt'), 'r').readlines():
            if line.strip(): self.imageids.append(line.strip())
        self.imageids = [str(_id) for _id in self.imageids]
        
    def __getitem__(self, index):
        imageid = self.imageids[index]
        imagepath = os.path.join(self.image_dir, imageid)
        annpath = os.path.join(self.ann_dir, imageid)
        sample = self.read(imagepath, annpath, self.dataset_cfg.get('with_ann', True))
        sample.update({'id': imageid})
        if self.mode == 'TRAIN':
            sample = self.synctransform(sample, 'without_totensor_normalize_pad')
            sample['edge'] = self.generateedge(sample['segmentation'].copy())
            sample = self.synctransform(sample, 'only_totensor_normalize_pad')
        else:
            sample = self.synctransform(sample, 'all')
        return sample['image'], sample['segmentation'], sample['edge']
    
    def __len__(self):
        return len(self.imageids)
