# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""the module is used to load transforms pipeline of dataset."""

import cv2

import mindspore.dataset as de
import mindspore.dataset.transforms.c_transforms as c
from mindspore.common import dtype as mstype

from mindvideo.datasets.builder import build_transforms, build_dataset_sampler, build_dataset
from mindvideo.common.utils.download import DownLoad


class DataLoader(DownLoad):
    """The dataset loader class.

    Args:
        dataset: dataset object
        map_cfg (dataload_cfg) : Config for ds.map().
        batch_cfg (dataload_cfg) : Config for ds.batch().
        config (dataload_cfg) : The data loader config dict.

    Examples:
        >>> dataloader = DataLoader(ds, map, batch, cfg)
        >>> ds = dataloder()
    """

    def __init__(self,
                 dataset,
                 map_cfg=None,
                 batch_cfg=None,
                 config=None,
                 train=True,
                 transform=None,
                 target_transform=None,
                 batch_size=None,
                 repeat_num=None,
                 num_parallel_workers=None,
                 download=False):
        """Constructor for Dataloader"""
        if config:
            self.dataset = dataset
            self.map_cfg = map_cfg
            self.batch_cfg = batch_cfg
            self.config = config
            self.map_ops = None
            self.per_batch_map = None
            self.target_transform = target_transform

            if self.map_cfg and self.map_cfg.operations:
                self.map_ops = build_transforms(self.map_cfg.operations)

            if self.batch_cfg and self.batch_cfg.per_batch_map:
                self.per_batch_map = build_transforms(self.batch_cfg.per_batch_map)
        else:
            self.dataset = dataset
            self.train = train
            self.transform = transform
            self.target_transform = target_transform
            self.batch_size = batch_size
            self.repeat_num = repeat_num
            self.num_parallel_workers = num_parallel_workers
            self.download = download

    def __call__(self):
        """Generate MindSpore dataset object."""
        if self.config.thread_num and self.config.thread_num >= 0:
            cv2.setNumThreads(self.config.thread_num)
        if self.config.prefetch_size:
            de.config.set_prefetch_size(self.config.prefetch_size)

        ds = self.dataset
        if self.map_ops:
            self.map_cfg.pop('operations')
            ds = ds.map(operations=self.data_augment, **self.map_cfg)
            if self.config.task == "classification":
                ds = ds.map(operations=c.TypeCast(mstype.int32), input_columns="label")
        if self.per_batch_map:
            self.batch_cfg.pop('per_batch_map')
            ds = ds.batch(per_batch_map=self.per_batch_map, **self.batch_cfg)
        else:
            ds = ds.batch(**self.batch_cfg)
        return ds

    def data_augment(self, *args):
        """Data augmentation function."""
        if len(self.map_cfg.input_columns) == 1:
            result = args[0]
        else:
            result = args
        for op in self.map_ops:
            result = op(result)
        return result


def build_dataloader(cfg, is_training=True, is_inferring=False):
    """
    Build dataset loading class.

    Args:
        cfg(dict): The data loader config dict.
        is_training: training flag.
        is_inferring: inferring flag.
    Returns:
        DataLoader object.
    """
    if is_training:
        config = cfg.train
    else:
        config = cfg.eval

    if is_inferring:
        config = cfg.infer

    # any data source convert to mindrecord
    if config.mindrecord:
        x2mindrecord = build_dataset(config.mindrecord)
        x2mindrecord()
    # used for custom dataset source
    if config.dataset.source and config.dataset.source.type:
        config.dataset.source = build_dataset(config.dataset.source)

    # init custom dataset sampler, sampler use for custom dataset source
    if config.dataset.source and config.dataset.sampler:
        config.dataset.sampler.dataset_size = len(config.dataset.source)
        config.dataset.sampler = build_dataset_sampler(config.dataset.sampler)

    # generate dataset objcet
    ds = build_dataset(config.dataset)

    # init dataset loader
    dataset_loader = DataLoader(ds, config.map, config.batch, cfg)
    return dataset_loader
