import logging

import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.checkpoint as cp
import torch.distributed as dist
from torch.nn.modules.batchnorm import _BatchNorm

from mmcv.cnn import constant_init, kaiming_init
from mmcv.runner import load_checkpoint, get_dist_info
from mmcv.cnn.weight_init import constant_init

from mmdet.ops import DeformConv, ModulatedDeformConv
from mmdet.models.registry import BACKBONES
from mmdet.models.utils import build_conv_layer, build_norm_layer, GaussianKernel, GumbelSigmoid


class BasicBlock(nn.Module):
    def __init__(self):
        super(BasicBlock, self).__init__()
        raise NotImplementedError


class Bottleneck(nn.Module):
    expansion = 4

    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 with_cp=False,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 dcn=None,
                 gumbel_cfg=None,
                 gaussian_kernel=-1,
                 grid_prior_interval=-1,):
        """Bottleneck block for ResNet.
        If style is "pytorch", the stride-two layer is the 3x3 conv layer,
        if it is "caffe", the stride-two layer is the first 1x1 conv layer.
        """
        super(Bottleneck, self).__init__()
        assert style in ['pytorch', 'caffe']
        assert dcn is None or isinstance(dcn, dict)
        self.inplanes = inplanes
        self.planes = planes
        self.stride = stride
        self.dilation = dilation
        self.style = style
        self.with_cp = with_cp
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.dcn = dcn
        self.with_dcn = dcn is not None
        self.downsample = downsample
        if self.style == 'pytorch':
            self.conv1_stride = 1
            self.conv2_stride = stride
        else:
            self.conv1_stride = stride
            self.conv2_stride = 1

        assert gumbel_cfg is not None
        assert gaussian_kernel != -1
        assert grid_prior_interval != -1

        self.gumbel_cfg = gumbel_cfg
        self.gaussian_kernel = gaussian_kernel
        self.grid_prior_interval = grid_prior_interval

        self.init_conv_layers()
        self.init_dev_layers()

    def init_conv_layers(self):
        self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, self.planes, postfix=1)
        self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, self.planes, postfix=2)
        self.norm3_name, norm3 = build_norm_layer(self.norm_cfg, self.planes * self.expansion, postfix=3)

        self.conv1 = build_conv_layer(
            self.conv_cfg,
            self.inplanes,
            self.planes,
            kernel_size=1,
            stride=self.conv1_stride,
            bias=False)
        self.add_module(self.norm1_name, norm1)
        fallback_on_stride = False
        self.with_modulated_dcn = False
        if self.with_dcn:
            fallback_on_stride = self.dcn.get('fallback_on_stride', False)
            self.with_modulated_dcn = self.dcn.get('modulated', False)
        if not self.with_dcn or fallback_on_stride:
            self.conv2 = build_conv_layer(
                self.conv_cfg,
                self.planes,
                self.planes,
                kernel_size=3,
                stride=self.conv2_stride,
                padding=self.dilation,
                dilation=self.dilation,
                bias=False)
        else:
            assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
            deformable_groups = self.dcn.get('deformable_groups', 1)
            if not self.with_modulated_dcn:
                conv_op = DeformConv
                offset_channels = 18
            else:
                conv_op = ModulatedDeformConv
                offset_channels = 27
            self.conv2_offset = nn.Conv2d(
                self.inplanes,
                deformable_groups * offset_channels,
                kernel_size=3,
                stride=self.conv2_stride,
                padding=self.dilation,
                dilation=self.dilation)
            self.conv2 = conv_op(
                self.planes,
                self.planes,
                kernel_size=3,
                stride=self.conv2_stride,
                padding=self.dilation,
                dilation=self.dilation,
                deformable_groups=deformable_groups,
                bias=False)
        self.add_module(self.norm2_name, norm2)
        self.conv3 = build_conv_layer(
            self.conv_cfg,
            self.planes,
            self.planes * self.expansion,
            kernel_size=1,
            bias=False)
        self.add_module(self.norm3_name, norm3)

        self.relu = nn.ReLU(inplace=True)

    def init_dev_layers(self):
        self.conv1_mask = nn.Conv2d(self.inplanes, 1, kernel_size=3, stride=self.conv1_stride, padding=1, dilation=1, bias=True)
        self.conv2_mask = nn.Conv2d(self.inplanes, 1, kernel_size=3, stride=self.conv2_stride, padding=1, dilation=1, bias=True)

        constant_init(self.conv1_mask, val=0, bias=3.)
        constant_init(self.conv2_mask, val=0, bias=3.)

        self.sigmoid = nn.Sigmoid()
        self.conv1_gumbel = GumbelSigmoid(**self.gumbel_cfg)
        self.conv2_gumbel = GumbelSigmoid(**self.gumbel_cfg)

        self.conv1_sigma = nn.Parameter(torch.tensor(3.))
        self.conv2_sigma = nn.Parameter(torch.tensor(3.))
        self.Gaussian = GaussianKernel(self.gaussian_kernel)
        self.gaussian_padding = (self.gaussian_kernel - 1) // 2

        self.img_count = 0.
        self.conv1_mask_density = 0.
        self.conv2_mask_density = 0.

    @property
    def norm1(self):
        return getattr(self, self.norm1_name)

    @property
    def norm2(self):
        return getattr(self, self.norm2_name)

    @property
    def norm3(self):
        return getattr(self, self.norm3_name)

    def reconstruct_feature(self, feature, mask, sigma):
        weight = self.Gaussian(sigma).reshape(
            (1, 1, self.gaussian_kernel, self.gaussian_kernel)).cuda()

        mask_norm = F.conv2d(
            input=mask,
            weight=weight,
            stride=1,
            padding=self.gaussian_padding,
            dilation=1
        )
        mask_norm = mask_norm + 1e-5

        weight = weight.repeat((self.planes, 1, 1, 1))

        feature_reconstruct = F.conv2d(
            input=feature * mask,
            weight=weight,
            stride=1,
            padding=self.gaussian_padding,
            dilation=1,
            groups=self.planes
        )
        feature_reconstruct = feature_reconstruct / mask_norm

        out = mask * feature + (1.0 - mask) * feature_reconstruct

        return out

    def forward(self, input):
        x = input[0]
        loss = input[1]

        def _inner_forward(x):
            identity = x
            _, _, h, w = x.shape

            # Convolution 1.
            out = self.conv1(x)
            out = self.norm1(out)
            out = self.relu(out)
            # End of Convolution 1.

            conv1_mask = self.conv1_mask(x)
            conv1_mask_soft = self.sigmoid(conv1_mask)
            conv1_mask = self.conv1_gumbel(conv1_mask_soft)
            conv1_mask[:, :, ::self.grid_prior_interval, ::self.grid_prior_interval] = 1.0

            if not self.training:
                conv1_mask_binary = (conv1_mask >= 0.5).float()
                conv1_mask = conv1_mask * conv1_mask_binary
                self.img_count = self.img_count + 1
                self.conv1_mask_density = self.conv1_mask_density + conv1_mask_binary.mean()
                dist.all_reduce(self.conv1_mask_density)
                self.conv1_mask_density = self.conv1_mask_density / dist.get_world_size()

            out = self.reconstruct_feature(out, conv1_mask, self.conv1_sigma)

            # Convolution 2
            if not self.with_dcn:
                out = self.conv2(out)
            elif self.with_modulated_dcn:
                offset_mask = self.conv2_offset(x)
                offset = offset_mask[:, :18, :, :]
                mask = offset_mask[:, -9:, :, :].sigmoid()
                out = self.conv2(out, offset, mask)
            else:
                offset = self.conv2_offset(x)
                out = self.conv2(out, offset)
            out = self.norm2(out)
            out = self.relu(out)
            # End of Convolution 2

            conv2_mask = self.conv2_mask(x)
            conv2_mask_soft = self.sigmoid(conv2_mask)
            conv2_mask = self.conv2_gumbel(conv2_mask_soft)
            conv2_mask[:, :, ::self.grid_prior_interval, ::self.grid_prior_interval] = 1.0
            if not self.training:
                conv2_mask_binary = (conv2_mask >= 0.5).float()
                conv2_mask = conv2_mask * conv2_mask_binary
                self.conv2_mask_density = self.conv2_mask_density + conv2_mask_binary.mean()
                dist.all_reduce(self.conv2_mask_density)
                self.conv2_mask_density = self.conv2_mask_density / dist.get_world_size()

            out = self.reconstruct_feature(out, conv2_mask, self.conv2_sigma)

            # Convolution 3 and Final Output.
            out = self.conv3(out)
            out = self.norm3(out)
            if self.downsample is not None:
                identity = self.downsample(x)
            out += identity
            # End of Convolution 3.

            loss = 0.25 * conv1_mask_soft.mean() + 0.75 * conv2_mask_soft.mean()
            return out, loss

        if self.with_cp and x.requires_grad:
            out = cp.checkpoint(_inner_forward, x)
        else:
            out = _inner_forward(x)

        _inner_loss = 0.0
        if isinstance(out, tuple):
            out, _inner_loss = out

        out = self.relu(out)

        return (out, loss + _inner_loss)


def make_res_layer(block,
                   inplanes,
                   planes,
                   blocks,
                   stride=1,
                   dilation=1,
                   style='pytorch',
                   with_cp=False,
                   conv_cfg=None,
                   norm_cfg=dict(type='BN'),
                   dcn=None,
                   **kwargs):
    downsample = None
    if stride != 1 or inplanes != planes * block.expansion:
        downsample = nn.Sequential(
            build_conv_layer(
                conv_cfg,
                inplanes,
                planes * block.expansion,
                kernel_size=1,
                stride=stride,
                bias=False),
            build_norm_layer(norm_cfg, planes * block.expansion)[1],
        )

    layers = []
    layers.append(
        block(
            inplanes,
            planes,
            stride,
            dilation,
            downsample,
            style=style,
            with_cp=with_cp,
            conv_cfg=conv_cfg,
            norm_cfg=norm_cfg,
            dcn=dcn,
            **kwargs,
        )
    )
    inplanes = planes * block.expansion
    for i in range(1, blocks):
        layers.append(
            block(
                inplanes,
                planes,
                1,
                dilation,
                style=style,
                with_cp=with_cp,
                conv_cfg=conv_cfg,
                norm_cfg=norm_cfg,
                dcn=dcn,
                **kwargs
            )
        )
    return nn.Sequential(*layers)


@BACKBONES.register_module
class SparseResNet(nn.Module):
    arch_settings = {
        18: (BasicBlock, (2, 2, 2, 2)),
        34: (BasicBlock, (3, 4, 6, 3)),
        50: (Bottleneck, (3, 4, 6, 3)),
        101: (Bottleneck, (3, 4, 23, 3)),
        152: (Bottleneck, (3, 8, 36, 3))
    }

    def __init__(self,
                 depth,
                 num_stages=4,
                 strides=(1, 2, 2, 2),
                 dilations=(1, 1, 1, 1),
                 out_indices=(0, 1, 2, 3),
                 style='pytorch',
                 frozen_stages=-1,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN', requires_grad=True),
                 norm_eval=True,
                 dcn=None,
                 stage_with_dcn=(False, False, False, False),
                 with_cp=False,
                 zero_init_residual=True,
                 gumbel_cfg=None,
                 gaussian_kernel=-1,
                 grid_prior_interval=-1,
                 sparse_loss_weight=0.):
        super(SparseResNet, self).__init__()
        if depth not in self.arch_settings:
            raise KeyError('invalid depth {} for resnet'.format(depth))
        self.depth = depth
        self.num_stages = num_stages
        assert num_stages >= 1 and num_stages <= 4
        self.strides = strides
        self.dilations = dilations
        assert len(strides) == len(dilations) == num_stages
        self.out_indices = out_indices
        assert max(out_indices) < num_stages
        self.style = style
        self.frozen_stages = frozen_stages
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.with_cp = with_cp
        self.norm_eval = norm_eval
        self.dcn = dcn
        self.stage_with_dcn = stage_with_dcn
        if dcn is not None:
            assert len(stage_with_dcn) == num_stages
        self.zero_init_residual = zero_init_residual
        self.block, stage_blocks = self.arch_settings[depth]
        self.stage_blocks = stage_blocks[:num_stages]
        self.inplanes = 64
        self._make_stem_layer()

        self.res_layers = []
        for i, num_blocks in enumerate(self.stage_blocks):
            stride = strides[i]
            dilation = dilations[i]
            dcn = self.dcn if self.stage_with_dcn[i] else None
            planes = 64 * 2**i
            res_layer = make_res_layer(
                self.block,
                self.inplanes,
                planes,
                num_blocks,
                stride=stride,
                dilation=dilation,
                style=self.style,
                with_cp=with_cp,
                conv_cfg=conv_cfg,
                norm_cfg=norm_cfg,
                dcn=dcn,
                gumbel_cfg=gumbel_cfg,
                gaussian_kernel=gaussian_kernel,
                grid_prior_interval=grid_prior_interval,
            )
            self.inplanes = planes * self.block.expansion
            layer_name = 'layer{}'.format(i + 1)
            self.add_module(layer_name, res_layer)
            self.res_layers.append(layer_name)

        self._freeze_stages()
        self.feat_dim = self.block.expansion * 64 * 2**(len(self.stage_blocks) - 1)

        self.sparse_loss_weight = sparse_loss_weight / float(sum(self.stage_blocks))

    @property
    def norm1(self):
        return getattr(self, self.norm1_name)

    def _make_stem_layer(self):
        self.conv1 = build_conv_layer(
            self.conv_cfg,
            3,
            64,
            kernel_size=7,
            stride=2,
            padding=3,
            bias=False)
        self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
        self.add_module(self.norm1_name, norm1)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

    def _freeze_stages(self):
        if self.frozen_stages >= 0:
            self.norm1.eval()
            for m in [self.conv1, self.norm1]:
                for param in m.parameters():
                    param.requires_grad = False

        for i in range(1, self.frozen_stages + 1):
            m = getattr(self, 'layer{}'.format(i))
            m.eval()
            for param in m.parameters():
                param.requires_grad = False

    def init_weights(self, pretrained=None):
        if isinstance(pretrained, str):
            logger = logging.getLogger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
        elif pretrained is None:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
                    constant_init(m, 1)

            if self.dcn is not None:
                for m in self.modules():
                    if isinstance(m, Bottleneck) and hasattr(
                            m, 'conv2_offset'):
                        constant_init(m.conv2_offset, 0)

            if self.zero_init_residual:
                for m in self.modules():
                    if isinstance(m, Bottleneck):
                        constant_init(m.norm3, 0)
                    elif isinstance(m, BasicBlock):
                        constant_init(m.norm2, 0)
        else:
            raise TypeError('pretrained must be a str or None')

    def forward(self, x):
        x = self.conv1(x)
        x = self.norm1(x)
        x = self.relu(x)
        x = self.maxpool(x)
        outs = []
        loss = 0.0
        for i, layer_name in enumerate(self.res_layers):
            loss_layer = 0.0
            res_layer = getattr(self, layer_name)
            x, loss_layer = res_layer((x, loss_layer))
            loss += loss_layer
            if i in self.out_indices:
                outs.append(x)
        return tuple(outs), loss * self.sparse_loss_weight

    def train(self, mode=True):
        super(SparseResNet, self).train(mode)
        self._freeze_stages()
        if mode and self.norm_eval:
            for m in self.modules():
                # trick: eval have effect on BatchNorm only
                if isinstance(m, _BatchNorm):
                    m.eval()

    def print_mask_density(self):
        from mmdet.utils import FlopsCalculator

        density_dict = dict()
        for i, layer_name in enumerate(self.res_layers):
            density_dict[layer_name] = dict()
            layer = getattr(self, layer_name)
            for block_index in range(self.stage_blocks[i]):
                block_name = f'block{block_index}'
                block = getattr(layer, str(block_index))

                density_dict[layer_name][block_name] = dict()
                density_dict[layer_name][block_name]['conv1'] = block.conv1_mask_density / block.img_count
                density_dict[layer_name][block_name]['conv2'] = block.conv2_mask_density / block.img_count

                print(f'[{layer_name}] [{block_name:>7}] '
                      f'[Img {int(block.img_count * dist.get_world_size())}] '
                      f'[Conv1 {block.conv1_mask_density / block.img_count:.4f}] '
                      f'[Conv2 {block.conv2_mask_density / block.img_count:.4f}] ')

        fc = FlopsCalculator(
            layer=self.depth,
            resolution=1000,
            density_dict=density_dict,
        )

        cal = fc.get_whole_cal()

        print(f'Baseline  : SparseResNet{self.depth}\n'
              f'Resolution: 1000\n'
              f'Flops     : {cal / 1e9:.2f} GFlops\n')