import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

thresh = 0.5  # 0.5 # neuronal threshold
lens = 0.5  # 0.5 # hyper-parameters of approximate function
decay = 0.25  # 0.25 # decay constants
time_window = 5
spike_mat = []

# define approximate firing function
class ActFun(torch.autograd.Function):
    @staticmethod
    def forward(ctx, input):
        ctx.save_for_backward(input)
        return input.gt(thresh).float()

    @staticmethod
    def backward(ctx, grad_output):
        input, = ctx.saved_tensors
        grad_input = grad_output.clone()
        temp = abs(input - thresh) < lens
        temp = temp / (2 * lens)
        return grad_input * temp.float()


act_fun = ActFun.apply
# membrane potential update

class mem_update(nn.Module):
    def __init__(self):
        super(mem_update, self).__init__()

    def forward(self, x):
        device = x.device
        mem = torch.zeros_like(x[0]).to(device)
        spike = torch.zeros_like(x[0]).to(device)
        output = torch.zeros_like(x)
        mem_old = 0
        for i in range(time_window):
            if i >= 1:
                mem = mem_old * decay * (1-spike.detach()) + x[i]
            else:
                mem = x[i]
            spike = act_fun(mem)
            spike_mat.append(spike.reshape(-1).mean().cpu().detach().numpy())
            mem_old = mem.clone()
            output[i] = spike
        return output

class batch_norm_2d(nn.Module):
    def __init__(self, num_features, eps=1e-5, momentum=0.1):
        super(batch_norm_2d, self).__init__()
        self.bn = BatchNorm3d1(num_features)  # input (N,C,D,H,W) 进行C-dimension batch norm on (N,D,H,W) slice. spatio-temporal Batch Normalization

    def forward(self, input):
        y = input.transpose(0, 2).contiguous().transpose(0, 1).contiguous()
        y = self.bn(y)
        return y.contiguous().transpose(0, 1).contiguous().transpose(0, 2)  # 原始输入是(T,N,C,H,W) BN处理时转变为(N,C,T,H,W)


class batch_norm_2d1(nn.Module):
    # 与batch_norm_2d 差异仅在初始化的weight更小
    def __init__(self, num_features, eps=1e-5, momentum=0.1):
        super(batch_norm_2d1, self).__init__()
        self.bn = BatchNorm3d2(num_features)

    def forward(self, input):
        y = input.transpose(0, 2).contiguous().transpose(0, 1).contiguous()
        y = self.bn(y)
        return y.contiguous().transpose(0, 1).contiguous().transpose(0, 2)


class BatchNorm3d1(torch.nn.BatchNorm3d):
    def reset_parameters(self):
        self.reset_running_stats()
        if self.affine:
            nn.init.constant_(self.weight, thresh)
            nn.init.zeros_(self.bias)

class BatchNorm3d2(torch.nn.BatchNorm3d):
    def reset_parameters(self):
        self.reset_running_stats()
        if self.affine:
            nn.init.constant_(self.weight, 0.2*thresh)
            nn.init.zeros_(self.bias)

class tdBatchNorm(nn.BatchNorm2d):
    """tdBN的实现。相关论文链接：https://arxiv.org/pdf/2011.05280。具体是在BN时，也在时间域上作平均；并且在最后的系数中引入了alpha变量以及Vth。
        Implementation of tdBN. Link to related paper: https://arxiv.org/pdf/2011.05280. In short it is averaged over the time domain as well when doing BN.
    Args:
        num_features (int): same with nn.BatchNorm2d
        eps (float): same with nn.BatchNorm2d
        momentum (float): same with nn.BatchNorm2d
        alpha (float): an addtional parameter which may change in resblock.
        affine (bool): same with nn.BatchNorm2d
        track_running_stats (bool): same with nn.BatchNorm2d
    """
    def __init__(self, num_features, eps=1e-05, momentum=0.1, alpha=1, affine=True, track_running_stats=True):
        super(tdBatchNorm, self).__init__(
            num_features, eps, momentum, affine, track_running_stats)
        self.alpha = alpha

    def forward(self, input):
        exponential_average_factor = 0.0

        if self.training and self.track_running_stats:
            if self.num_batches_tracked is not None:
                self.num_batches_tracked += 1
                if self.momentum is None:  # use cumulative moving average
                    exponential_average_factor = 1.0 / float(self.num_batches_tracked)
                else:  # use exponential moving average
                    exponential_average_factor = self.momentum

        # calculate running estimates
        if self.training:
            mean = input.mean([0, 1, 3, 4])  # T,B,C,H,W
            # use biased var in train
            var = input.var([0, 1, 3, 4], unbiased=False)
            n = input.numel() / input.size(2)
            with torch.no_grad():
                self.running_mean = exponential_average_factor * mean\
                    + (1 - exponential_average_factor) * self.running_mean
                # update running_var with unbiased var
                self.running_var = exponential_average_factor * var * n / (n - 1)\
                    + (1 - exponential_average_factor) * self.running_var
        else:
            mean = self.running_mean
            var = self.running_var

        input = self.alpha * thresh * (input - mean[None, None, :, None, None]) / (torch.sqrt(var[None, None, :, None, None] + self.eps))
        if self.affine:
            input = input * self.weight[None, None, :, None, None] + self.bias[None, None, :, None, None]

        return input

class SNN_Upsample(nn.Upsample):
    def __init__(self, size = None, scale_factor = None, mode = 'nearest', align_corners = None):
        super(SNN_Upsample,self).__init__(size, scale_factor, mode, align_corners)
    
    def forward(self,input):
        h = int(input.shape[3]*self.scale_factor)
        w = int(input.shape[4]*self.scale_factor)
        output = torch.zeros(size=(input.shape[0],input.shape[1],input.shape[2],h,w), device=input.device)
        for i in range(time_window):
            output[i] = F.upsample(input[i],scale_factor=self.scale_factor)
        return output

class Snn_Conv2d(nn.Conv2d):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1,
                 padding=0, dilation=1, groups=1,
                 bias=True, padding_mode='zeros', marker='b'):
        super(Snn_Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode)
        self.marker = marker

    def forward(self, input):
        weight = self.weight
        h = (input.size()[3]-self.kernel_size[0]+2*self.padding[0])//self.stride[0]+1
        w = (input.size()[4]-self.kernel_size[1]+2*self.padding[1])//self.stride[1]+1
        c1 = torch.zeros(time_window, input.size()[1], self.out_channels, h, w, device=input.device)
        
        for i in range(time_window):
            c1[i] = F.conv2d(input[i], weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
        return c1

class Snn_Trans_Conv2d(nn.ConvTranspose2d):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1,output_padding=0,
                 padding=0, dilation=1, groups=1,
                 bias=True, padding_mode='zeros', marker='b'):
        super(Snn_Trans_Conv2d, self).__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding,output_padding=output_padding,dilation=dilation, groups=groups, bias=bias, padding_mode=padding_mode)
        self.marker = marker
    
    def forward(self, input):
        weight = self.weight
        h = (input.size()[3]-1)*self.stride[0]-2*self.padding[0]+self.dilation[0]*(self.kernel_size[0]-1)+self.output_padding[0]+1
        w = (input.size()[4]-1)*self.stride[1]-2*self.padding[1]+self.dilation[1]*(self.kernel_size[1]-1)+self.output_padding[1]+1
        c1 = torch.zeros(time_window, input.size()[1], self.out_channels, h, w, device=input.device)
        for i in range(time_window):
            c1[i] = F.conv_transpose2d(input[i], weight, self.bias, self.stride, self.padding,self.output_padding, self.groups,self.dilation)
        return c1

class SNN_Avg_Pool2d(nn.AvgPool2d):
    def __init__(self, kernel_size, stride, padding):
        super(SNN_Avg_Pool2d,self).__init__(kernel_size=kernel_size, stride=stride, padding=padding,ceil_mode=False,count_include_pad=True,divisor_override=None)
    
    def forward(self,input):
        h = (input.size()[3]-self.kernel_size+2*self.padding)//self.stride+1
        w = (input.size()[4]-self.kernel_size+2*self.padding)//self.stride+1
        output = torch.zeros(time_window, input.size()[1], input.size()[2], h, w, device=input.device)
        for i in range(time_window):
            output[i] = F.avg_pool2d(input[i],self.kernel_size, self.stride,self.padding)
        return output

class SNN_Max_Pool2d(nn.MaxPool2d):
    def __init__(self, kernel_size, stride, padding):
        super(SNN_Max_Pool2d,self).__init__(kernel_size=kernel_size, stride=stride, padding=padding,ceil_mode=False,return_indices=False)
    
    def forward(self,input):
        h = (input.size()[3]-self.kernel_size+2*self.padding)//self.stride+1
        w = (input.size()[4]-self.kernel_size+2*self.padding)//self.stride+1
        output = torch.zeros(time_window, input.size()[1], input.size()[2], h, w, device=input.device)
        for i in range(time_window):
            output[i] = F.max_pool2d(input[i],self.kernel_size, self.stride,self.padding)
        return output

class SNN_Softmax2d(nn.Softmax2d):
    def __init__(self,):
        super(SNN_Softmax2d, self).__init__()
    
    def forward(self, input):
        output = torch.zeros(input.shape, device=input.device)
        for i in range(time_window):
            output[i] = F.softmax(input[i])
        return output

class SCNN_Model(nn.Module):
    def __init__(self, in_chan,out_chan,kernal_size=3):
        super(SCNN_Model, self).__init__()
        self.in_chan = in_chan
        self.out_chan = out_chan
        self.filter_weight = torch.tensor(torch.ones(in_chan,1,kernal_size,kernal_size)).cuda()
        self.filter_weight.require_grad=False

    def forward(self,input):
        output = torch.zeros(time_window, input.size()[1], self.out_chan, input.size()[3], input.size()[4], device=input.device)
        for i in range(time_window):
            output[i] = F.conv2d(input[i], self.filter_weight,stride = (1,1),padding = 1,groups=self.out_chan)
        return output

class fire_snn_res(nn.Module):
    def __init__(self, inplanes, squeeze_planes=16, expand_planes=64,bias=True,transpose=False):
        super(fire_snn_res, self).__init__()
        expand_planes = expand_planes // 2 
        if not transpose:
            self.lif1 = mem_update()
            self.conv1 = Snn_Conv2d(inplanes, squeeze_planes, kernel_size=1, stride=1, padding=0,bias=bias)
            self.bn1 = tdBatchNorm(squeeze_planes)

            self.lif2 = mem_update()
            self.conv2 = Snn_Conv2d(squeeze_planes, expand_planes, kernel_size=1, stride=1, padding=0,bias=bias)
            self.bn2 = tdBatchNorm(expand_planes)

            self.lif3 = mem_update()
            self.SCNN = SCNN_Model(squeeze_planes,squeeze_planes)
            self.bn3 = tdBatchNorm(squeeze_planes)

            self.lif4 = mem_update()
            self.conv3 = Snn_Conv2d(squeeze_planes, expand_planes, kernel_size=1, stride=1, padding=0,bias=bias)
            self.bn4 = tdBatchNorm(expand_planes)
        else:
            self.lif1 = mem_update()
            self.conv1 = Snn_Trans_Conv2d(inplanes, squeeze_planes, kernel_size=1, stride=1, padding=0,bias=bias)
            self.bn1 = tdBatchNorm(squeeze_planes)

            self.lif2 = mem_update()
            self.conv2 = Snn_Trans_Conv2d(squeeze_planes, expand_planes, kernel_size=1, stride=1, padding=0,bias=bias)
            self.bn2 = tdBatchNorm(expand_planes)

            self.lif3 = mem_update()
            self.SCNN = SCNN_Model(squeeze_planes,squeeze_planes)
            self.bn3 = tdBatchNorm(squeeze_planes)

            self.lif4 = mem_update()
            self.conv3 = Snn_Trans_Conv2d(squeeze_planes, expand_planes, kernel_size=1, stride=1, padding=0,bias=bias)
            self.bn4 = tdBatchNorm(expand_planes)

    def forward(self, input):
        x = self.lif1(input)
        x = self.conv1(x)
        x = self.bn1(x)

        out1 = self.lif2(x)
        out1 = self.conv2(out1)
        out1 = self.bn2(out1)

        out2 = self.lif3(x)
        out2 = self.SCNN(out2)
        out2 = self.bn3(out2)

        out2 = self.lif4(out2)
        out2 = self.conv3(out2)
        out2 = self.bn4(out2)
        out = torch.cat([out1, out2], 2)
        return out

class BasicConv(nn.Module):
    def __init__(self, in_channel, out_channel, kernel_size, stride, bias=True, transpose=False):
        super(BasicConv, self).__init__()
        padding = kernel_size // 2
        layers = list()
        if transpose:
            padding = kernel_size // 2 -1
            if stride == 2:
                layers.append(SNN_Upsample(scale_factor=2))
            layers.append(fire_snn_res(in_channel,16,out_channel,bias=bias,transpose=transpose))
            # layers.append(Snn_Trans_Conv2d(in_channel, out_channel, kernel_size, padding=padding, stride=stride, bias=bias))
        else:
            if stride == 2:
                layers.append(SNN_Max_Pool2d(kernel_size=kernel_size,padding=padding, stride=stride))
            layers.append(fire_snn_res(in_channel,16,out_channel,bias=bias))

        self.main = nn.Sequential(*layers)

    def forward(self, x):
        return self.main(x)


class ResBlock(nn.Module):
    def __init__(self, in_channel, out_channel):
        super(ResBlock, self).__init__()
        self.main = nn.Sequential(
            BasicConv(in_channel, out_channel, kernel_size=3, stride=1),
            BasicConv(out_channel, out_channel, kernel_size=3, stride=1)
        )

    def forward(self, x):
        return self.main(x) + x


class EBlock(nn.Module):
    def __init__(self, out_channel, num_res=8):
        super(EBlock, self).__init__()

        layers = [ResBlock(out_channel, out_channel) for _ in range(num_res)]

        self.layers = nn.Sequential(*layers)

    def forward(self, x):
        return self.layers(x)


class DBlock(nn.Module):
    def __init__(self, channel, num_res=8):
        super(DBlock, self).__init__()

        layers = [ResBlock(channel, channel) for _ in range(num_res)]
        self.layers = nn.Sequential(*layers)

    def forward(self, x):
        return self.layers(x)


class AFF(nn.Module):
    def __init__(self, in_channel, out_channel):
        super(AFF, self).__init__()
        self.conv = nn.Sequential(
            BasicConv(in_channel, out_channel, kernel_size=1, stride=1),
            BasicConv(out_channel, out_channel, kernel_size=3, stride=1)
        )

    def forward(self, x1, x2, x4):
        x = torch.cat([x1, x2, x4], dim=2)
        return self.conv(x)


class SCM(nn.Module):
    def __init__(self, out_plane):
        super(SCM, self).__init__()
        self.main = nn.Sequential(
            BasicConv(3, out_plane//4, kernel_size=3, stride=1),
            BasicConv(out_plane // 4, out_plane // 2, kernel_size=1, stride=1),
            BasicConv(out_plane // 2, out_plane // 2, kernel_size=3, stride=1),
            BasicConv(out_plane // 2, out_plane - 3, kernel_size=1, stride=1)
        )

        self.conv = BasicConv(out_plane-1, out_plane, kernel_size=1, stride=1)

    def forward(self, x):
        main_res = self.main(x)
        x = torch.cat([x, main_res], dim=2)
        x = self.conv(x)
        return x


class FAM(nn.Module):
    def __init__(self, channel):
        super(FAM, self).__init__()
        self.merge = BasicConv(channel, channel, kernel_size=3, stride=1)

    def forward(self, x1, x2):
        x = x1 * x2
        out = x1 + self.merge(x)
        return out

class MIMO_UNet_SNN_FIRE_SCNN(nn.Module):
    def __init__(self, num_res=8, num_classes=12):
        super(MIMO_UNet_SNN_FIRE_SCNN, self).__init__()
        base_channel = 32

        self.Encoder = nn.ModuleList([
            EBlock(base_channel, num_res),
            EBlock(base_channel*2, num_res),
            EBlock(base_channel*4, num_res),
        ])

        self.feat_extract = nn.ModuleList([
            BasicConv(3, base_channel, kernel_size=3, stride=1),
            BasicConv(base_channel, base_channel*2, kernel_size=3, stride=2),
            BasicConv(base_channel*2, base_channel*4, kernel_size=3, stride=2),
            BasicConv(base_channel*4, base_channel*2, kernel_size=4, stride=2, transpose=True),
            BasicConv(base_channel*2, base_channel, kernel_size=4, stride=2, transpose=True),
            BasicConv(base_channel, num_classes, kernel_size=3, stride=1)
        ])

        self.Decoder = nn.ModuleList([
            DBlock(base_channel * 4, num_res),
            DBlock(base_channel * 2, num_res),
            DBlock(base_channel, num_res)
        ])

        self.Convs = nn.ModuleList([
            BasicConv(base_channel * 4, base_channel * 2, kernel_size=1, stride=1),
            BasicConv(base_channel * 2, base_channel, kernel_size=1, stride=1),
        ])

        self.ConvsOut = nn.ModuleList(
            [
                BasicConv(base_channel * 4, num_classes, kernel_size=3, stride=1),
                BasicConv(base_channel * 2, num_classes, kernel_size=3, stride=1),
            ]
        )

        self.AFFs = nn.ModuleList([
            AFF(base_channel * 7, base_channel*1),
            AFF(base_channel * 7, base_channel*2)
        ])

        self.skips = nn.ModuleList([
            BasicConv(3, num_classes, kernel_size=1, stride=1, bias=False),
            BasicConv(3, num_classes, kernel_size=1, stride=1, bias=False),
            BasicConv(3, num_classes, kernel_size=1, stride=1, bias=False),
        ])

        self.FAM1 = FAM(base_channel * 4)
        self.SCM1 = SCM(base_channel * 4)
        self.FAM2 = FAM(base_channel * 2)
        self.SCM2 = SCM(base_channel * 2)

        self.output_lif = nn.ModuleList([
            mem_update(),
            mem_update(),
            mem_update(),
        ])

    def get_spike_mat(self):
        return self.spike_mat.mean()

    def forward(self, inputs, spike_mat_return=False):
        global spike_mat
        spike_mat = []

        device = inputs.device
        inputs_2 = F.interpolate(inputs, scale_factor=0.5)
        inputs_4 = F.interpolate(inputs_2, scale_factor=0.5)

        x = torch.zeros(time_window, inputs.size()[0], inputs.size()[1], inputs.size()[2], inputs.size()[3], device=device)
        x_2 = torch.zeros(time_window, inputs_2.size()[0], inputs_2.size()[1], inputs_2.size()[2], inputs_2.size()[3], device=device)
        x_4 = torch.zeros(time_window, inputs_4.size()[0], inputs_4.size()[1], inputs_4.size()[2], inputs_4.size()[3], device=device)
        for i in range(time_window):
            x[i] = inputs
            x_2[i] = inputs_2
            x_4[i] = inputs_4

        z2 = self.SCM2(x_2)
        z4 = self.SCM1(x_4)

        outputs = list()

        x_ = self.feat_extract[0](x)
        res1 = self.Encoder[0](x_)

        z = self.feat_extract[1](res1)
        z = self.FAM2(z, z2)
        res2 = self.Encoder[1](z)

        z = self.feat_extract[2](res2)
        z = self.FAM1(z, z4)
        z = self.Encoder[2](z)

        z12 = []
        z21 = []
        z42 = []
        z41 = []
        for i in range(time_window):
            z12.append(F.interpolate(res1[i], scale_factor=0.5))
            z21.append(F.interpolate(res2[i], scale_factor=2))
            z42.append(F.interpolate(z[i], scale_factor=2))
            z41.append(F.interpolate(z42[i], scale_factor=2))
        z12 = torch.stack(z12)
        z21 = torch.stack(z21)
        z42 = torch.stack(z42)
        z41 = torch.stack(z41)

        res2 = self.AFFs[1](z12, res2, z42)
        res1 = self.AFFs[0](res1, z21, z41)

        z = self.Decoder[0](z)
        z_ = self.ConvsOut[0](z)
        z = self.feat_extract[3](z)
        outputs_1 = z_+self.skips[2](x_4)
        outputs_1 = self.output_lif[0](outputs_1)
        outputs_1 = outputs_1.sum(dim=0)/outputs_1.shape[0]
        outputs_1 = outputs_1.squeeze(0)
        if len(outputs_1.shape)==3:
            outputs_1 = outputs_1.unsqueeze(0)
        outputs.append(outputs_1)

        z = torch.cat([z, res2], dim=2)
        z = self.Convs[0](z)
        z = self.Decoder[1](z)
        z_ = self.ConvsOut[1](z)
        z = self.feat_extract[4](z)
        outputs_2 = z_+self.skips[1](x_2)
        outputs_2 = self.output_lif[1](outputs_2)
        outputs_2 = outputs_2.sum(dim=0)/outputs_2.shape[0]
        outputs_2 = outputs_2.squeeze(0)
        if len(outputs_2.shape)==3:
            outputs_2 = outputs_2.unsqueeze(0)
        outputs.append(outputs_2)

        z = torch.cat([z, res1], dim=2)
        z = self.Convs[1](z)
        z = self.Decoder[2](z)
        z = self.feat_extract[5](z)
        outputs_3 = z+self.skips[0](x)
        outputs_3 = self.output_lif[2](outputs_3)
        outputs_3 = outputs_3.sum(dim=0)/outputs_3.shape[0]
        outputs_3 = outputs_3.squeeze(0)
        if len(outputs_3.shape)==3:
            outputs_3 = outputs_3.unsqueeze(0)
        outputs.append(outputs_3)
        self.spike_mat = np.array(spike_mat).reshape((time_window,-1))
        if not spike_mat_return:
            return outputs
        else:
            return outputs,np.array(spike_mat).reshape((time_window,-1)).mean()

