import os
import numpy as np
import cv2
import luojianet
import luojianet.nn as nn
import luojianet.ops as ops
from luojianet import nn, ops, Parameter, Tensor
from .base import BasetestDataset
import h5py

class LandslideDataset(BasetestDataset):
    num_classes = 2
    classnames = ['Non-Landslide','Landslide']
    def __init__(self, mode, logger_handle, dataset_cfg):
        super(LandslideDataset, self).__init__(mode, logger_handle, dataset_cfg)
        # obtain the dirs
        setmap_dict = {'train': 'TrainData', 'val': 'ValidData'}
        rootdir = dataset_cfg['rootdir']
        # self.image_dir = os.path.join(rootdir, f"{setmap_dict[dataset_cfg['set']]}", 'img')
        # self.ann_dir = os.path.join(rootdir, f"{setmap_dict[dataset_cfg['set']]}", 'mask')
        self.image_dir = rootdir
        self.ann_dir = rootdir
        self.imageids = []
        if self.mode == 'TRAIN':
            # print("train idx txt file: ", os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '.txt'))
            print("train idx txt file: ", os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '.txt'))
        else:
            # print("val idx txt file: ", os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '.txt'))
            print("val idx txt file: ", os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '.txt'))
       
        # for line in open(os.path.join(rootdir, setmap_dict[dataset_cfg['set']]+'.txt'), 'r').readlines():
        for line in open(os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '.txt'), 'r').readlines():
            if line.strip(): self.imageids.append(line.strip())
        self.imageids = [str(_id) for _id in self.imageids]
        
    def __getitem__(self, index):
        # imageid = self.imageids[index]
        # imagepath = os.path.join(self.image_dir, imageid)
        # annpath = os.path.join(self.ann_dir, imageid.replace('img', 'mask').replace('image', 'mask'))
        # sample = self.read(imagepath, annpath, self.dataset_cfg.get('with_ann', True))
        # sample.update({'id': imageid})
        # if self.mode == 'TRAIN':
        #     sample = self.synctransform(sample, 'without_totensor_normalize_pad')
        #     sample['edge'] = self.generateedge(sample['segmentation'].copy())
        #     sample = self.synctransform(sample, 'only_totensor_normalize_pad')
        # else:
        #     sample = self.synctransform(sample, 'all')
        # return sample['image'], sample['segmentation'], sample['edge']
        
        # imageid
        sample = {}
        imageid = self.imageids[index]
        imagepath = os.path.join(self.image_dir, imageid)
        annpath = os.path.join(self.ann_dir, imageid.replace('img', 'mask').replace('image', 'mask'))
        # read annotation(mask)
        with h5py.File(annpath, 'r') as af:
            sample_label = Tensor(af['mask'][:], luojianet.int32).unsqueeze(0)
        af.close()
        # read image(14 bands)
        with h5py.File(imagepath, 'r') as f:
            sample_image = Tensor(f['img'][:].tolist(), luojianet.float32).permute(2, 0, 1)
        f.close()
        sample.update({'label': sample_label})
        sample.update({'id': imageid})
        # identify the type of data used: sar etc.  
        data_type_list = self.dataset_cfg['data_mode'].split('_')
        # read data of sar/ hsi/ dsm
        sample_sar = None
        sample_optical = None
        sample_hsi = None
        sample_dsm = None
        if 'sar' in data_type_list:
            sample_sar = sample_image[13, :, :]
        if 'optical' in data_type_list:
            sample_optical = sample_image[:3, :, :]
        if 'hsi' in data_type_list:
            sample_hsi = sample_image[:12, :, :]
        if 'dsm' in data_type_list:
            sample_dsm = sample_image[12:13, :, : ]
        sample.update({
            'sar': sample_sar,
            'hsi': sample_hsi,
            'dsm': sample_dsm,
            'image': sample_image,
            'label': sample_label, 
        })
        # 分别进行各自的数据增强
        # if self.mode=='TRAIN':
        #     if sar in data_list:
        #         sample_sar = []
        #         sample.append(sample_sar)
        #     elif optical in data_list:
        #         sample_optical = []
        # else: # 验证集增强的操作
        #     pass  
        #  如果融合方式是input_add, 那么concat后返回sample + label， 如果不是，就返回sapmle[0], sample[1]. label分别代表rgb和mode_x
        if self.dataset_cfg['fusion_method'] == 'input_add': 
            return sample['image'], sample['label']
        else:
            assert 'landslide4sense need use input_add fusion method, please check the  fusion_method cfg and in_channels of backbone cfg'
    def __len__(self):
        return len(self.imageids)
