# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Zheng Zhang
# --------------------------------------------------------

import numpy as np
import mxnet as mx
import random
import math
import copy_reg
import types

from mxnet.executor_manager import _split_input_slice
from multiprocessing import Pool
from segmentation.dataprocesser import SegDataProcesser


def _pickle_method(m):
    if m.im_self is None:
        return getattr, (m.im_class, m.im_func.func_name)
    else:
        return getattr, (m.im_self, m.im_func.func_name)
copy_reg.pickle(types.MethodType, _pickle_method)


class TestDataLoader(mx.io.DataIter):
    def __init__(self, segdb, config, batch_size=1, shuffle=False,ctx=[mx.cpu()],has_label = True):
        super(TestDataLoader, self).__init__()

        # save parameters as properties
        self.segdb = segdb
        last = segdb[0]
        self.batch_size = batch_size
        self.shuffle = shuffle
        self.config = config
        self.ctx = ctx

        # infer properties from roidb
        self.size = len(self.segdb)
        self.index = np.arange(self.size)
        self.dataproccesser = SegDataProcesser(
            enable_crop = config.TEST.enable_crop,
            crop_size=    config.TEST.crop_size,
            enable_scale= config.TEST.enable_scale,
            scale_range=  config.TEST.scale_range,
            enable_rotation= config.TEST.enable_rotation,
            rotation_range=  config.TEST.rotation_range,
            color_scale= config.network.COLOR_SCALE,
            pixel_means =  config.network.PIXEL_MEANS,
            pixel_stds= config.network.PIXEL_STDS,
            flipped_ratio= config.TEST.flipped_ratio,
            has_label= has_label
        )

        # decide data and label names (only for training)
        self.data_name = ['data']
        self.label_name = ['label']

        # status variable for synchronization between get_data and get_label
        self.cur = 0
        self.data = None
        self.label = []
        self.im_info = None
        self.has_label = has_label

        # get first batch to fill in provide_data and provide_label
        self.reset()
        self.get_batch()


    @property
    def provide_data(self):
        res = [[(k, v.shape) for k, v in zip(self.data_name, self.data[i])] for i in xrange(len(self.data))]
        return res

    @property
    def provide_label(self):
        return [None for i in xrange(len(self.data))]

    @property
    def provide_data_single(self):
        this=[(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
        return this

    @property
    def provide_label_single(self):
        return None

    def reset(self):
        self.cur = 0
        if self.shuffle:
            np.random.shuffle(self.index)

    def iter_next(self):
        return self.cur < self.size

    def next(self):
        if self.iter_next():
            self.get_batch()
            self.cur += self.batch_size
            return mx.io.DataBatch(data=self.data, label=self.label,
                                   pad=self.getpad(), index=self.getindex(),
                                   provide_data=self.provide_data, provide_label=self.provide_label)
        else:
            raise StopIteration

    def getindex(self):
        return self.cur / self.batch_size

    def getpad(self):
        if self.cur > self.size:
            return self.cur - self.size
        else:
            return 0

    def get_batch(self):
        cur_from = self.cur
        cur_to = min(cur_from + self.batch_size, self.size)
        segdb = [self.segdb[self.index[i]] for i in range(cur_from, cur_to)]

        # decide multi device slice
        work_load_list = [1] * len(self.ctx)

        slices = _split_input_slice(self.batch_size, work_load_list)
        rst = []

        for idx, islice in enumerate(slices):
            isegdb = [segdb[i] for i in range(islice.start, islice.stop) if i < len(segdb)]
            if not isegdb==[]:
                rst.append(self.dataproccesser.get_segmentation_batch(isegdb))

        all_data = [_['total_data'] for _ in rst]
        all_label = [_['total_label'] for _ in rst]

        self.data = [[mx.nd.array(data[key]) for key in self.data_name] for data in all_data]

        if self.has_label:
            self.label = [[mx.nd.array(label[key]) for key in self.label_name] for label in all_label]


class TrainDataLoader(mx.io.DataIter):
    def __init__(self, sym, segdb, config, batch_size=1,
                 shuffle=True, ctx=None, work_load_list=None):

        """
        This Iter will provide seg data to Deeplab network
        :param sym: to infer shape
        :param segdb: must be preprocessed
        :param config: config file
        :param batch_size: must divide BATCH_SIZE(128)
        :param ctx: list of contexts
        :param work_load_list: list of work load
        :return: DataLoader
        """
        super(TrainDataLoader, self).__init__()

        # save parameters as properties
        self.sym = sym
        self.segdb = segdb
        self.config = config

        self.dataproccesser = SegDataProcesser(
            enable_crop = config.TRAIN.enable_crop,
            crop_size=    config.TRAIN.crop_size,
            enable_scale= config.TRAIN.enable_scale,
            scale_range=  config.TRAIN.scale_range,
            enable_rotation= config.TRAIN.enable_rotation,
            rotation_range=  config.TRAIN.rotation_range,
            color_scale= config.network.COLOR_SCALE,
            pixel_means= config.network.PIXEL_MEANS,
            pixel_stds= config.network.PIXEL_STDS,
            flipped_ratio= config.TRAIN.flipped_ratio,
            label_stride=  config.network.LABEL_STRIDE,
            has_label=True
        )


        self.shuffle = shuffle
        self.ctx = ctx
        self.batch_size = batch_size

        if self.ctx is None:
            self.ctx = [mx.cpu()]
        self.work_load_list = work_load_list

        # infer properties from segdb
        self.size = len(segdb)
        self.index = np.arange(self.size)

        # decide data and label names
        self.data_name = ['data']
        self.label_name = ['label']

        # status variable for synchronization between get_data and get_label
        self.cur = 0
        self.batch = None
        self.data = None
        self.label = None

        # init multi-process pool
        self.pool = Pool(processes = len(self.ctx)*2)

        # get first batch to fill in provide_data and provide_label
        self.reset()
        self.get_batch()
        random.seed()

    @property
    def provide_data(self):
        return [[(k, v.shape) for k, v in zip(self.data_name, self.data[i])] for i in xrange(len(self.data))]

    @property
    def provide_label(self):
        return [[(k, v.shape) for k, v in zip(self.label_name, self.label[i])] for i in xrange(len(self.data))]

    @property
    def provide_data_single(self):
        return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]

    @property
    def provide_label_single(self):
        return [(k, v.shape) for k, v in zip(self.label_name, self.label[0])]

    def reset(self):
        self.cur = 0
        if self.shuffle:
            np.random.shuffle(self.index)

    def iter_next(self):
        return self.cur + self.batch_size <= self.size

    def next(self):
        if self.iter_next():
            self.get_batch()
            self.cur += self.batch_size
            return mx.io.DataBatch(data=self.data, label=self.label,
                                   pad=self.getpad(), index=self.getindex(),
                                   provide_data=self.provide_data, provide_label=self.provide_label)
        else:
            raise StopIteration


    def __getitem__(self, item):
        """
        get the i-batch of the train data 
        :param item:  the i-th batch in the train data
        :return: 
        """

        def getindex(cur):
            return cur / self.batch_size

        def getpad(cur):
            if cur + self.batch_size > self.size:
                return cur + self.batch_size - self.size
            else:
                return 0

        cur_from = item*self.batch_size
        cur_to = min(cur_from + self.batch_size, self.size)

        segdb = [self.segdb[self.index[i]] for i in range(cur_from,cur_to)]

        # decide multi device slice
        work_load_list = self.work_load_list
        ctx = self.ctx
        assert work_load_list==None,"the work_load_list must be the None"

        if work_load_list is None:
            work_load_list = [1] * len(ctx)
        assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
            "Invalid settings for work load. "
        slices = _split_input_slice(self.batch_size, work_load_list)

        multiprocess_results = []
        for idx, islice in enumerate(slices):
            isegdb = [segdb[i] for i in range(islice.start, islice.stop) if i < len(segdb)]
            multiprocess_results.append(self.pool.apply_async(self.dataproccesser.get_segmentation_batch, (isegdb,)))

        rst = [multiprocess_result.get() for multiprocess_result in multiprocess_results]

        all_data = [_['total_data'] for _ in rst]
        all_label = [_['total_label'] for _ in rst]

        return mx.io.DataBatch(data=[[mx.nd.array(data[key]) for key in self.data_name] for data in all_data],
                               label=[[mx.nd.array(label[key]) for key in self.label_name] for label in all_label],
                               pad=getpad(cur_from), index=getindex(cur_from),
                               provide_data=self.provide_data, provide_label=self.provide_label)

    def getindex(self):
        return self.cur / self.batch_size

    def getpad(self):
        if self.cur + self.batch_size > self.size:
            print "pading",self.cur + self.batch_size - self.size
            return self.cur + self.batch_size - self.size
        else:
            return 0

    def infer_shape(self, max_data_shape=None, max_label_shape=None):
        """ Return maximum data and label shape for single gpu """
        if max_data_shape is None:
            max_data_shape = []
        if max_label_shape is None:
            max_label_shape = []

        max_shapes = dict(max_data_shape + max_label_shape)
        _, label_shape, _ = self.sym.infer_shape(**max_shapes)
        label_shape = [(self.label_name[0], label_shape[0])]
        return max_data_shape, label_shape



    def get_batch(self):

        cur_from = self.cur
        cur_to = min(cur_from + self.batch_size, self.size)
        segdb = [self.segdb[self.index[i]] for i in range(cur_from, cur_to)]

        # decide multi device slice
        work_load_list = self.work_load_list
        ctx = self.ctx
        if work_load_list is None:
            work_load_list = [1] * len(ctx)
        assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
            "Invalid settings for work load. "
        slices = _split_input_slice(self.batch_size, work_load_list)

        multiprocess_results = []
        for idx, islice in enumerate(slices):
            isegdb = [segdb[i] for i in range(islice.start, islice.stop) if i < len(segdb)]
            multiprocess_results.append(self.pool.apply_async(self.dataproccesser.get_segmentation_batch,(isegdb,)))
            # multiprocess_results.append(self.dataproccesser.get_segmentation_batch(isegdb,))

        rst = [multiprocess_result.get() for multiprocess_result in multiprocess_results]

        all_data = [_['total_data'] for _ in rst]
        all_label = [_['total_label'] for _ in rst]

        self.data = [[mx.nd.array(data[key]) for key in self.data_name] for data in all_data]
        self.label = [[mx.nd.array(label[key]) for key in self.label_name] for label in all_label]