import warnings

import torch.nn as nn
from mmcv.cnn import constant_init, kaiming_init

from .activation import build_activation_layer
from .conv import build_conv_layer
from .norm import build_norm_layer


class ConvModule(nn.Module):
	"""A conv block that contains conv/norm/activation layers.

	Args:
		in_channels (int): Same as nn.Conv2d.
		out_channels (int): Same as nn.Conv2d.
		kernel_size (int or tuple[int]): Same as nn.Conv2d.
		stride (int or tuple[int]): Same as nn.Conv2d.
		padding (int or tuple[int]): Same as nn.Conv2d.
		dilation (int or tuple[int]): Same as nn.Conv2d.
		groups (int): Same as nn.Conv2d.
		bias (bool or str): If specified as `auto`, it will be decided by the
			norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
			False.
		conv_cfg (dict): Config dict for convolution layer.
		norm_cfg (dict): Config dict for normalization layer.
		act_cfg (dict): Config dict for activation layer, "relu" by default.
		inplace (bool): Whether to use inplace mode for activation.
		order (tuple[str]): The order of conv/norm/activation layers. It is a
			sequence of "conv", "norm" and "act". Examples are
			("conv", "norm", "act") and ("act", "conv", "norm").
	"""

	def __init__(self,
				 in_channels,
				 out_channels,
				 kernel_size,
				 stride=1,
				 padding=0,
				 dilation=1,
				 groups=1,
				 bias='auto',
				 conv_cfg=None,
				 norm_cfg=None,
				 act_cfg=dict(type='ReLU'),
				 inplace=True,
				 order=('conv', 'norm', 'act')):
		super(ConvModule, self).__init__()
		assert conv_cfg is None or isinstance(conv_cfg, dict)
		assert norm_cfg is None or isinstance(norm_cfg, dict)
		assert act_cfg is None or isinstance(act_cfg, dict)
		self.conv_cfg = conv_cfg
		self.norm_cfg = norm_cfg
		self.act_cfg = act_cfg
		self.inplace = inplace
		self.order = order
		assert isinstance(self.order, tuple) and len(self.order) == 3
		assert set(order) == set(['conv', 'norm', 'act'])

		self.with_norm = norm_cfg is not None
		self.with_activation = act_cfg is not None
		# if the conv layer is before a norm layer, bias is unnecessary.
		if bias == 'auto':
			bias = False if self.with_norm else True
		self.with_bias = bias

		if self.with_norm and self.with_bias:
			warnings.warn('ConvModule has norm and bias at the same time')

		# build convolution layer
		self.conv = build_conv_layer(
			conv_cfg,
			in_channels,
			out_channels,
			kernel_size,
			stride=stride,
			padding=padding,
			dilation=dilation,
			groups=groups,
			bias=bias)
		# export the attributes of self.conv to a higher level for convenience
		self.in_channels = self.conv.in_channels
		self.out_channels = self.conv.out_channels
		self.kernel_size = self.conv.kernel_size
		self.stride = self.conv.stride
		self.padding = self.conv.padding
		self.dilation = self.conv.dilation
		self.transposed = self.conv.transposed
		self.output_padding = self.conv.output_padding
		self.groups = self.conv.groups

		# build normalization layers
		if self.with_norm:
			# norm layer is after conv layer
			if order.index('norm') > order.index('conv'):
				norm_channels = out_channels
			else:
				norm_channels = in_channels
			self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)
			self.add_module(self.norm_name, norm)

		# build activation layer
		if self.with_activation:
			act_cfg_ = act_cfg.copy()
			act_cfg_.setdefault('inplace', inplace)
			self.activate = build_activation_layer(act_cfg_)

		# Use msra init by default
		self.init_weights()

	@property
	def norm(self):
		return getattr(self, self.norm_name)

	def init_weights(self):
		if self.with_activation and self.act_cfg['type'] == 'LeakyReLU':
			nonlinearity = 'leaky_relu'
		else:
			nonlinearity = 'relu'

		if (self.conv_cfg is not None) and (self.conv_cfg['type'] == 'ConvDWS'):
			kaiming_init(self.conv.depthwise, nonlinearity=nonlinearity)
			kaiming_init(self.conv.pointwise, nonlinearity=nonlinearity)
		else:
			kaiming_init(self.conv, nonlinearity=nonlinearity)

		if self.with_norm:
			constant_init(self.norm, 1, bias=0)

	def forward(self, x, activate=True, norm=True):
		for layer in self.order:
			if layer == 'conv':
				x = self.conv(x)
			elif layer == 'norm' and norm and self.with_norm:
				x = self.norm(x)
			elif layer == 'act' and activate and self.with_activation:
				x = self.activate(x)
		return x


class BnModule(nn.Module):

	def __init__(self,
				 num_channels,
				 norm_cfg=dict(type='BN'),
				 act_cfg=dict(type='ReLU'),
				 inplace=True,
				 order=('norm', 'act')):
		super(BnModule, self).__init__()
		assert norm_cfg is None or isinstance(norm_cfg, dict)
		assert act_cfg is None or isinstance(act_cfg, dict)
		self.norm_cfg = norm_cfg
		self.act_cfg = act_cfg
		self.inplace = inplace
		self.order = order
		assert isinstance(self.order, tuple) and len(self.order) == 2
		assert set(order) == set(['norm', 'act'])

		self.num_channels = num_channels
		self.with_norm = norm_cfg is not None
		self.with_activation = act_cfg is not None

		# build normalization layers
		if self.with_norm:
			norm_channels = num_channels
			self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)
			self.add_module(self.norm_name, norm)

		# build activation layer
		if self.with_activation:
			act_cfg_ = act_cfg.copy()
			act_cfg_.setdefault('inplace', inplace)
			self.activate = build_activation_layer(act_cfg_)

		# Use msra init by default
		self.init_weights()

	@property
	def norm(self):
		return getattr(self, self.norm_name)

	def init_weights(self):
		if self.with_norm:
			constant_init(self.norm, 1, bias=0)

	def forward(self, x, activate=True, norm=True):
		for layer in self.order:
			if layer == 'norm' and norm and self.with_norm:
				x = self.norm(x)
			elif layer == 'act' and activate and self.with_activation:
				x = self.activate(x)
		return x
