'''
Implementation of the GDN non-linearity based on the papers:

"Density Modeling of Images using a Generalized Normalization Transformation"

Johannes Ballé, Valero Laparra, Eero P. Simoncelli

https://arxiv.org/abs/1511.06281

 from https://github.com/jorge-pessoa/pytorch-gdn/blob/master/pytorch_gdn/__init__.py
'''

import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.autograd import Function


class LowerBound(Function):
	@staticmethod
	def forward(ctx, inputs, bound):
		b = torch.ones(inputs.size()) * bound
		b = b.to(inputs.device)
		ctx.save_for_backward(inputs, b)
		return torch.max(inputs, b)
	
	@staticmethod
	def backward(ctx, grad_output):
		inputs, b = ctx.saved_tensors
		
		pass_through_1 = inputs >= b
		pass_through_2 = grad_output < 0
		
		pass_through = pass_through_1 | pass_through_2
		return pass_through.type(grad_output.dtype) * grad_output, None


class GDN(nn.Module):
	"""Generalized divisive normalization layer.
	y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]))
	"""
	
	def __init__(self,
	             ch,
	             device,
	             inverse=False,
	             beta_min=1e-6,
	             gamma_init=.1,
	             reparam_offset=2 ** -18):
		super(GDN, self).__init__()
		self.inverse = inverse
		self.beta_min = beta_min
		self.gamma_init = gamma_init
		self.reparam_offset = torch.FloatTensor([reparam_offset])
		
		self.build(ch, torch.device(device))
	
	def build(self, ch, device):
		self.pedestal = self.reparam_offset ** 2
		self.beta_bound = (self.beta_min + self.reparam_offset ** 2) ** .5
		self.gamma_bound = self.reparam_offset
		
		# Create beta param
		beta = torch.sqrt(torch.ones(ch) + self.pedestal)
		self.beta = nn.Parameter(beta.to(device))
		
		# Create gamma param
		eye = torch.eye(ch)
		g = self.gamma_init * eye
		g = g + self.pedestal
		gamma = torch.sqrt(g)
		
		self.gamma = nn.Parameter(gamma.to(device))
		self.pedestal = self.pedestal.to(device)
	
	def forward(self, inputs):
		# Assert internal parameters to same device as input
		self.beta = self.beta.to(inputs.device)
		self.gamma = self.gamma.to(inputs.device)
		self.pedestal = self.pedestal.to(inputs.device)
		
		unfold = False
		if inputs.dim() == 5:
			unfold = True
			bs, ch, d, w, h = inputs.size()
			inputs = inputs.view(bs, ch, d * w, h)
		
		_, ch, _, _ = inputs.size()
		
		# Beta bound and reparam
		beta = LowerBound.apply(self.beta, self.beta_bound)
		beta = beta ** 2 - self.pedestal
		
		# Gamma bound and reparam
		gamma = LowerBound.apply(self.gamma, self.gamma_bound)
		gamma = gamma ** 2 - self.pedestal
		gamma = gamma.view(ch, ch, 1, 1)
		
		# Norm pool calc
		norm_ = nn.functional.conv2d(inputs ** 2, gamma, beta)
		norm_ = torch.sqrt(norm_)
		
		# Apply norm
		if self.inverse:
			outputs = inputs * norm_
		else:
			outputs = inputs / norm_
		
		if unfold:
			outputs = outputs.view(bs, ch, d, w, h)
		return outputs