# Author: Xiangtai Li
# Email: lxtpku@pku.edu.cn

import mindspore
import os.path as osp
import numpy as np
import random
import cv2
from mindspore import dataset as ds
from mindspore import Tensor
from mindspore import context
from mindspore.context import ParallelMode
from src.logger import Logger as Log
import argparse

def str2bool(v):
    if v.lower() in ('yes', 'true', 't', 'y', '1'):
        return True
    elif v.lower() in ('no', 'false', 'f', 'n', '0'):
        return False
    else:
        raise argparse.ArgumentTypeError('Boolean value expected.')
class Cityscapes:

    def __init__(self, opts, crop_size, scale, mirror, max_iters, mean=(128, 128, 128), vars=(1,1,1), ignore_label=255):
        self.root = opts.data_dir
        self.list_path = opts.data_list
        self.crop_h, self.crop_w = crop_size
        self.scale = scale
        self.ignore_label = ignore_label
        self.mean = mean
        self.vars = vars
        self.is_mirror = mirror
        self.rgb = opts.rgb
        self.img_ids = [i_id.strip().split() for i_id in open(self.list_path)]
        self.max_iters = max_iters
        print(self.max_iters)
        if not self.max_iters==None:
                self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
        self.files = []
        for item in self.img_ids:
            image_path, label_path = item
            name = osp.splitext(osp.basename(label_path))[0]
            img_file = osp.join(self.root, image_path)
            label_file = osp.join(self.root, label_path)
            self.files.append({
                "img": img_file,
                "label": label_file,
                "name": name,
                "image_path": image_path,
                "label_path": label_path
            })
        self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
                              3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
                              7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
                              14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
                              18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
                              28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
        print('{} images are loaded!'.format(len(self.files)))

    def __len__(self):
        return len(self.files)

    def generate_scale_label(self, image, label):
        f_scale = 0.7 + random.randint(0, 14) / 10.0
        image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
        label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
        return image, label

    def id2trainId(self, label, reverse=False):
        label_copy = label.copy()
        if reverse:
            for v, k in self.id_to_trainid.items():
                label_copy[label == k] = v
        else:
            for k, v in self.id_to_trainid.items():
                label_copy[label == k] = v
        return label_copy

    def __getitem__(self, index):
        datafiles = self.files[index]
        image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
        label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)

        label = self.id2trainId(label)

        if self.scale:
            image, label = self.generate_scale_label(image, label)
        image = np.asarray(image, np.float32)

        if self.rgb:
            image = image[:,:, ::-1]  ## BGR -> RGB
            image /= 255         ## using pytorch pretrained models

        image -= self.mean
        image /= self.vars

        img_h, img_w = label.shape
        pad_h = max(self.crop_h - img_h, 0)
        pad_w = max(self.crop_w - img_w, 0)
        if pad_h > 0 or pad_w > 0:
            img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
                pad_w, cv2.BORDER_CONSTANT,
                value=(0.0, 0.0, 0.0))
            label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
                pad_w, cv2.BORDER_CONSTANT,
                value=(self.ignore_label,))
        else:
            img_pad, label_pad = image, label

        img_h, img_w = label_pad.shape
        h_off = random.randint(0, img_h - self.crop_h)
        w_off = random.randint(0, img_w - self.crop_w)

        image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
        label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)


        image = image.transpose((2, 0, 1))

        if self.is_mirror:
            flip = np.random.choice(2) * 2 - 1
            image = image[:, :, ::flip]
            label = label[:, ::flip]

        return image.copy(), label.copy()
            #, Tensor(list(img_path)), Tensor(list(lab_path))

def create_traindataset(opts, crop_size, scale, mirror, max_iters, mean=(128, 128, 128), vars=(1,1,1)):
    """create arbitrary style transfer dataset"""
    parallel_mode = context.get_auto_parallel_context("parallel_mode")
    if parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
        dataset = Cityscapes(opts, crop_size=crop_size, max_iters=max_iters, mean=mean, vars=vars, scale=scale, mirror=mirror)
        device_num = opts.device_num
        rank_id = opts.rank_id
        #sampler = MySampler(dataset, local_rank=rank_id, world_size=device_num)
        DS = ds.GeneratorDataset(dataset, column_names=['image', 'label'], num_parallel_workers=opts.num_workers, shuffle=False, num_shards=device_num, shard_id=rank_id)
        DS = DS.batch(opts.batch_size, num_parallel_workers=opts.num_workers, drop_remainder=True)
    else:
        dataset = Cityscapes(opts, crop_size=crop_size, max_iters=max_iters, mean=mean, vars=vars, scale=scale, mirror=mirror)
        DS = ds.GeneratorDataset(dataset, column_names=['image', 'label'], num_parallel_workers=opts.num_workers,
                                 shuffle=False)
        DS = DS.batch(opts.batch_size, num_parallel_workers=opts.num_workers)
    return DS
'''
if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="DGCNet-ResNet101 Network")
    parser.add_argument("--batch_size_per_gpu", type=int, default=1,
                        help="Number of images sent to the network in one step.")
    parser.add_argument("--batch_size", type=int, default=1,
                        help="Number of images sent to the network in one step.")
    parser.add_argument('--gpu_num', type=int, default=1)
    parser.add_argument("--data_dir", type=str, default="/home/tangyibo/work/project/dataset/cityscapes",
                        help="Path to the directory containing the Cityscapes dataset.")
    parser.add_argument("--data_list", type=str, default="/home/tangyibo/work/project/msp/DGCNet/data/cityscapes/train.txt",
                        help="Path to the file listing the images in the dataset.")
    parser.add_argument("--data_set", type=str, default="cityscapes", help="dataset to train")
    # parser.add_argument("--arch", type=str, default="", help="network architecture")
    parser.add_argument("--ignore_label", type=int, default=255,
                        help="The index of the label to ignore during the training.")
    parser.add_argument("--input_size", type=int, default=832,
                        help="Comma-separated string with height and width of images.")
    parser.add_argument("--learning_rate", type=float, default=1e-2,
                        help="Base learning rate for training with polynomial decay.")
    parser.add_argument("--momentum", type=float, default=0.9,
                        help="Momentum component of the optimiser.")
    parser.add_argument("--num_classes", type=int, default=19,
                        help="Number of classes to predict (including background).")
    parser.add_argument("--num_steps", type=int, default=60000,
                        help="Number of training steps.")
    parser.add_argument("--power", type=float, default=0.9,
                        help="Decay parameter to compute the learning rate.")
    parser.add_argument("--weight_decay", type=float, default=5e-4,
                        help="Regularisation parameter for L2-loss.")
    parser.add_argument("--num_workers", type=int, default=1)
    parser.add_argument("--random_mirror", action="store_true", default=True,
                        help="Whether to randomly mirror the inputs during the training.")
    parser.add_argument("--random_scale", action="store_true", default=True,
                        help="Whether to randomly scale the inputs during the training.")
    parser.add_argument("--random_seed", type=int, default=1234,
                        help="Random seed to have reproducible results.")
    parser.add_argument('--run_distribute', type=bool, default=False, help='Run distribute')
    parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.")
    parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.")
    # ***** Params for save and load ******
    parser.add_argument("--restore_from", type=str, default=None,
                        help="Where restore models parameters from.")
    parser.add_argument("--save_checkpoint_steps", type=int, default=50,
                        help="Save checkpoint every often.")
    parser.add_argument("--save_checkpoint_num", type=int, default=1,
                        help="Save checkpoint numbers, default is 1.")
    parser.add_argument("--save_dir", type=str, default=None,
                        help="Where to save snapshots of the models.")
    parser.add_argument("--save_start", type=int, default=40000)
    parser.add_argument("--gpu", type=str, default=None,
                        help="choose gpu device.")
    parser.add_argument("--ft", type=bool, default=False,
                        help="fine-tune the models with large input size.")
    # **** Params for OHEM **** #
    parser.add_argument("--ohem", type=str2bool, default='False',
                        help="use hard negative mining")
    parser.add_argument("--ohem_thres", type=float, default=0.7,
                        help="choose the samples with correct probability underthe threshold.")
    parser.add_argument("--ohem_keep", type=int, default=100000,
                        help="choose the samples with correct probability underthe threshold.")
    # ***** Params for logging ***** #
    parser.add_argument('--log_level', default="info", type=str,
                        dest='log_level', help='To set the log level to files.')
    parser.add_argument('--log_file', default="./eval.log", type=str,
                        dest='log_file', help='The path of log files.')
    parser.add_argument("--log_format", default="%(asctime)s %(levelname)-7s %(message)s", type=str,
                        dest="log_format", help="format of log files"
                        )
    parser.add_argument('--stdout_level', default="info", type=str,
                        dest='stdout_level', help='To set the level to print to screen.')
    parser.add_argument("--rewrite", default=False, type=bool,
                        dest="rewrite", help="whether write the file when using log"
                        )
    parser.add_argument("--rgb", type=str2bool, default='False')
    # ***** Params for Distributed Traning ***** #
    parser.add_argument('--apex', action='store_true', default=False,
                        help='Use Nvidia Apex Distributed Data Parallel')
    parser.add_argument("--local_rank", default=0, type=int, help="parameter used by apex library")
    args = parser.parse_args()
    batch_size = args.gpu_num * args.batch_size_per_gpu
    max_iters = args.num_steps * batch_size / args.gpu_num
    if args.rgb:
        IMG_MEAN = np.array((0.485, 0.456, 0.406), dtype=np.float32)
        IMG_VARS = np.array((0.229, 0.224, 0.225), dtype=np.float32)
    else:
        IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434), dtype=np.float32)
        IMG_VARS = np.array((1, 1, 1), dtype=np.float32)

    train_ds = create_traindataset(args, max_iters=max_iters, mean=IMG_MEAN, vars=IMG_VARS)
    print("ok")
    train_data_loader = train_ds.create_dict_iterator()
    steps = train_ds.get_dataset_size()
    print(train_ds.get_dataset_size())
    for i, data in enumerate(train_data_loader):
        image = data['image']
        label = data['label']
        #img_path = data['img_path']
        #lab_path = data['lab_path']
        print(i,"******************************************************************")
        #print("image_path", img_path)
        #print("label_path", lab_path)
        reshape = mindspore.ops.Reshape()
        label = reshape(label, (-1,))
        ne = mindspore.ops.NotEqual()
        valid_mask = ne(label, 255)
        num_valid = valid_mask.astype("float32").sum()
        print("num_valid", num_valid)
'''
