'''
Function:
    Load the Inria dataset
Author:
    Mi Zhang
'''
import os
import numpy as np
import cv2
import luojianet
import luojianet.nn as nn
import luojianet.ops as ops
from luojianet import nn, ops, Parameter, Tensor
from .base import BaseDataset


# '''SBUShadowDataset'''
# class InriaDataset(BaseDataset):
#     num_classes = 2
#     classnames = ['__backgroud__', 'building']
#     def __init__(self, mode, logger_handle, dataset_cfg):
#         super(InriaDataset, self).__init__(mode, logger_handle, dataset_cfg)
#         # obtain the dirs
#         setmap_dict = {'train': 'train', 'val': 'valid'}
#         rootdir = dataset_cfg['rootdir']
#         # self.image_dir = os.path.join(rootdir, f"cropped300/{setmap_dict[dataset_cfg['set']]}", 'img')
#         # self.ann_dir = os.path.join(rootdir, f"cropped300/{setmap_dict[dataset_cfg['set']]}", 'label')
#         self.image_dir = os.path.join(rootdir, f"cropped300_small/{setmap_dict[dataset_cfg['set']]}", 'image')
#         self.ann_dir = os.path.join(rootdir, f"cropped300_small/{setmap_dict[dataset_cfg['set']]}", 'binary_map')
#         # obatin imageids
#         self.imageids = []
#         if self.mode == 'TRAIN':
#             # print("train idx txt file: ", os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '.txt'))
#             print("train idx txt file: ", os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '_small.txt'))
#         else:
#             # print("val idx txt file: ", os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '.txt'))
#             print("val idx txt file: ", os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '_small.txt'))

#         # for line in open(os.path.join(rootdir, setmap_dict[dataset_cfg['set']]+'.txt'), 'r').readlines():
#         for line in open(os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '_small.txt'), 'r').readlines():
#             if line.strip(): self.imageids.append(line.strip())
#         self.imageids = [str(_id) for _id in self.imageids]

#         self.simu_data = np.random.rand(3, 640, 640).astype(np.float32)
#         self.simu_label = np.random.randint(0,2, (640, 640)).astype(np.int32)
#         self.simu_edge = np.random.randint(0,1, (640, 640)).astype(np.int32)

#     '''pull item'''
#     def __getitem__(self, index):
#         imageid = self.imageids[index]
#         imagepath = os.path.join(self.image_dir, imageid+'.tif')
#         annpath = os.path.join(self.ann_dir, imageid+'.tif')
#         if not os.path.isfile(annpath):
#             print('annpath: ', annpath)
#         sample = self.read(imagepath, annpath, self.dataset_cfg.get('with_ann', True))
#         sample.update({'id': imageid})
#         if self.mode == 'TRAIN':
#             sample['segmentation'][sample['segmentation'] > 0] = 1.
#             # sample = self.synctransform(sample, 'without_totensor_normalize_pad')
#             # sample['edge'] = self.generateedge(sample['segmentation'].copy())
#             sample = self.synctransform(sample, 'only_totensor_normalize_pad')
#         else:
#             sample['groundtruth'][sample['groundtruth'] > 0] = 1.
#             sample = self.synctransform(sample, 'all')
#         # print('*************************')
#         # print(sample['image'].shape)
#         # print( sample['segmentation'].shape)
#         # print('*************************')

#         # sample = {
#         #     'image': self.simu_data, 
#         #     'segmentation': self.simu_label, 
#         #     'edge': self.generateedge(self.simu_label.copy()), 
#         #     'width': self.simu_data.shape[1], 
#         #     'height': self.simu_data.shape[0]
#         # }

#         # sample_tuple = zip(sample.keys(), sample.values())

#         # return  sample['image'], sample['segmentation'], sample['edge']
#         # return sample_tuple
#         return sample['image'], sample['segmentation']
#         # return self.simu_data, self.simu_label
#     '''length'''
#     def __len__(self):
#         return len(self.imageids)


'''InriaDataset'''
class InriaDataset(BaseDataset):
    num_classes = 2
    classnames = ['__backgroud__', 'building']
    def __init__(self, mode, logger_handle, dataset_cfg):
        # super(InriaDataset, self).__init__(mode, logger_handle, dataset_cfg)
        super(InriaDataset, self).__init__(mode, logger_handle, dataset_cfg)
        # obtain the dirs
        setmap_dict = {'train': 'train', 'val': 'valid'}
        rootdir = dataset_cfg['rootdir']
        # self.image_dir = os.path.join(rootdir, f"cropped300/{setmap_dict[dataset_cfg['set']]}", 'img')
        # self.ann_dir = os.path.join(rootdir, f"cropped300/{setmap_dict[dataset_cfg['set']]}", 'label')
        self.image_dir = os.path.join(rootdir, f"cropped300_small/{setmap_dict[dataset_cfg['set']]}", 'image')
        self.ann_dir = os.path.join(rootdir, f"cropped300_small/{setmap_dict[dataset_cfg['set']]}", 'binary_map')
        # obatin imageids
        self.imageids = []
        if self.mode == 'TRAIN':
            # print("train idx txt file: ", os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '.txt'))
            print("train idx txt file: ", os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '_small.txt'))
        else:
            # print("val idx txt file: ", os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '.txt'))
            print("val idx txt file: ", os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '_small.txt'))

        # for line in open(os.path.join(rootdir, setmap_dict[dataset_cfg['set']]+'.txt'), 'r').readlines():
        for line in open(os.path.join(rootdir, setmap_dict[dataset_cfg['set']] + '_small.txt'), 'r').readlines():
            if line.strip(): self.imageids.append(line.strip())
        self.imageids = [str(_id) for _id in self.imageids]

        self.simu_data = np.random.rand(3, 640, 640).astype(np.float32)
        self.simu_label = np.random.randint(0,2, (640, 640)).astype(np.int32)
        self.simu_edge = np.random.randint(0,1, (640, 640)).astype(np.int32)

    def __len__(self):
        return len(self.imageids)

    def __getitem__(self, index):
        # img_path = self.files[i]
        # target_path = self.targets[i]


        imageid = self.imageids[index]
        imagepath = os.path.join(self.image_dir, imageid+'.tif')
        annpath = os.path.join(self.ann_dir, imageid+'.tif')
        if not os.path.isfile(annpath):
            print('annpath: ', annpath)
        sample = self.read(imagepath, annpath, self.dataset_cfg.get('with_ann', True))
        sample.update({'id': imageid})
        if self.mode == 'TRAIN':
            sample['segmentation'][sample['segmentation'] > 0] = 1.
            sample = self.synctransform(sample, 'without_totensor_normalize_pad')
            sample['edge'] = self.generateedge(sample['segmentation'].copy())
            sample = self.synctransform(sample, 'only_totensor_normalize_pad')
        else:
            sample['groundtruth'][sample['groundtruth'] > 0] = 1.
            sample = self.synctransform(sample, 'all')
        
        # cv2.imshow("edge", np.uint8(sample['edge']))
        # cv2.waitKey(0)
       
        # print('*************************')
        # print(sample['image'].shape)
        # print( sample['segmentation'].shape)
        # print('*************************')

        # sample = {
        #     'image': self.simu_data, 
        #     'segmentation': self.simu_label, 
        #     'edge': self.generateedge(self.simu_label.copy()), 
        #     'width': self.simu_data.shape[1], 
        #     'height': self.simu_data.shape[0]
        # }

        # sample_tuple = zip(sample.keys(), sample.values())

        return  sample['image'], sample['segmentation'], sample['edge']
        # return sample_tuple
        # return sample['image'], sample['segmentation']


        # imageid = self.imageids[index]
        # imagepath = os.path.join(self.image_dir, imageid+'.tif')
        # annpath = os.path.join(self.ann_dir, imageid+'.tif')

        # img = cv2.imread(imagepath, cv2.IMREAD_COLOR)
        # target = cv2.imread(annpath, cv2.IMREAD_GRAYSCALE)
        # target[target>0] = 1

        # img = cv2.resize(img, (640, 640), cv2.INTER_LINEAR)
        # target = cv2.resize(target, (640, 640), cv2.INTER_NEAREST)

        # img, target = self.preprocess_and_augmentation(img, target)

        # return img, target

    def preprocess_and_augmentation(self, image, label):
        """
        Reference:
            https://gitee.com/luojianet/models/blob/master/official/cv/OCRNet/src/basedataset.py
        """
        image = self.input_transform(image)  # HWC
        label = self.label_transform(label)  # HW

        image = image.transpose((2, 0, 1))  # CHW

        image, label = self.flip(image, label)

        # image CHW, label HW
        return image, label

    def input_transform(self, image):
        """Transform data format of images."""
        POTSDAM_DEFAULT_MEAN = (0.491, 0.482, 0.447)
        POTSDAM_DEFAULT_STD = (0.247, 0.243, 0.262)
        image = image.astype(np.float32)[:, :, ::-1]  # BGR2RGB
        image = image / 255.0
        image -= POTSDAM_DEFAULT_MEAN
        image /= POTSDAM_DEFAULT_STD
        return image

    def label_transform(self, label):
        """Transform data format of labels."""
        return np.array(label).astype('int32')

    def flip(self, image, label):
        flip = np.random.choice(2) * 2 - 1
        image = image[:, :, ::flip]
        label = label[:, ::flip]
        return image, label