# Copyright (c) Open-MMLab. All rights reserved.
#
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License  (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import torch
from torch.nn.parallel.distributed import (DistributedDataParallel,
                                           _find_tensors)

# from mmcv import print_log
TORCH_VERSION = torch.__version__
from .scatter_gather import scatter_kwargs


class MMDistributedDataParallel(DistributedDataParallel):
    """The DDP module that supports DataContainer.

    MMDDP has two main differences with PyTorch DDP:

    - It supports a custom type :class:`DataContainer` which allows more
      flexible control of input data.
    - It implement two APIs ``train_step()`` and ``val_step()``.
    """

    def scatter(self, inputs, kwargs, device_ids):
        return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)

    def train_step(self, *inputs, **kwargs):
        """train_step() API for module wrapped by DistributedDataParallel.

        This method is basically the same as
        ``DistributedDataParallel.forward()``, while replacing
        ``self.module.forward()`` with ``self.module.train_step()``.
        It is compatible with PyTorch 1.1 - 1.5.
        """

        # In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the
        # end of backward to the beginning of forward.
        # if (TORCH_VERSION >= '1.7' and 'parrots'
        #         not in TORCH_VERSION) and self.reducer._rebuild_buckets():
        #     print_log(
        #         'Reducer buckets have been rebuilt in this iteration.',
        #         logger='mmcv')

        if getattr(self, 'require_forward_param_sync', True):
            self._sync_params()
        if self.device_ids and False:
            inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
            if len(self.device_ids) == 1:
                output = self.module.train_step(*inputs[0], **kwargs[0])
            else:
                outputs = self.parallel_apply(
                    self._module_copies[:len(inputs)], inputs, kwargs)
                output = self.gather(outputs, self.output_device)
        else:
            inputs, kwargs = self.scatter(inputs, kwargs, [-1])
            output = self.module.train_step(*inputs[0], **kwargs[0])

        if torch.is_grad_enabled() and getattr(
                self, 'require_backward_grad_sync', True):
            if self.find_unused_parameters:
                self.reducer.prepare_for_backward(list(_find_tensors(output)))
            else:
                self.reducer.prepare_for_backward([])
        else:
            if TORCH_VERSION > '1.2':
                self.require_forward_param_sync = False
        return output

    def val_step(self, *inputs, **kwargs):
        """val_step() API for module wrapped by DistributedDataParallel.

        This method is basically the same as
        ``DistributedDataParallel.forward()``, while replacing
        ``self.module.forward()`` with ``self.module.val_step()``.
        It is compatible with PyTorch 1.1 - 1.5.
        """
        # In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the
        # end of backward to the beginning of forward.
        # if (TORCH_VERSION >= '1.7' and 'parrots'
        #         not in TORCH_VERSION) and self.reducer._rebuild_buckets():
        #     print_log(
        #         'Reducer buckets have been rebuilt in this iteration.',
        #         logger='mmcv')

        if getattr(self, 'require_forward_param_sync', True):
            self._sync_params()
        if self.device_ids:
            inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
            if len(self.device_ids) == 1:
                output = self.module.val_step(*inputs[0], **kwargs[0])
            else:
                outputs = self.parallel_apply(
                    self._module_copies[:len(inputs)], inputs, kwargs)
                output = self.gather(outputs, self.output_device)
        else:
            output = self.module.val_step(*inputs, **kwargs)

        if torch.is_grad_enabled() and getattr(
                self, 'require_backward_grad_sync', True):
            if self.find_unused_parameters:
                self.reducer.prepare_for_backward(list(_find_tensors(output)))
            else:
                self.reducer.prepare_for_backward([])
        else:
            if TORCH_VERSION > '1.2':
                self.require_forward_param_sync = False
        return output




















# # Copyright (c) Open-MMLab. All rights reserved.
# import torch
# import torch.distributed as dist
# import torch.nn as nn
# from torch._utils import (_flatten_dense_tensors, _take_tensors,
#                           _unflatten_dense_tensors)

# from .scatter_gather import scatter_kwargs


# class MMDistributedDataParallel(nn.Module):

#     def __init__(self,
#                  module,
#                  dim=0,
#                  broadcast_buffers=True,
#                  bucket_cap_mb=25):
#         super(MMDistributedDataParallel, self).__init__()
#         self.module = module
#         self.dim = dim
#         self.broadcast_buffers = broadcast_buffers

#         self.broadcast_bucket_size = bucket_cap_mb * 1024 * 1024
#         self._sync_params()

#     def _dist_broadcast_coalesced(self, tensors, buffer_size):
#         for tensors in _take_tensors(tensors, buffer_size):
#             flat_tensors = _flatten_dense_tensors(tensors)
#             dist.broadcast(flat_tensors, 0)
#             for tensor, synced in zip(
#                     tensors, _unflatten_dense_tensors(flat_tensors, tensors)):
#                 tensor.copy_(synced)

#     def _sync_params(self):
#         module_states = list(self.module.state_dict().values())
#         if len(module_states) > 0:
#             self._dist_broadcast_coalesced(module_states,
#                                            self.broadcast_bucket_size)
#         if self.broadcast_buffers:
#             if torch.__version__ < '1.0':
#                 buffers = [b.data for b in self.module._all_buffers()]
#             else:
#                 buffers = [b.data for b in self.module.buffers()]
#             if len(buffers) > 0:
#                 self._dist_broadcast_coalesced(buffers,
#                                                self.broadcast_bucket_size)

#     def scatter(self, inputs, kwargs, device_ids):
#         return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)

#     def forward(self, *inputs, **kwargs):
#         # gpu
#         # inputs, kwargs = self.scatter(inputs, kwargs,
#         #                               [torch.cuda.current_device()])

#         # npu_diff
#         inputs, kwargs = self.scatter(inputs, kwargs, [-1])
#         return self.module(*inputs[0], **kwargs[0])
