import torch
import torch.nn as nn
from torch.nn.parameter import Parameter

class TempSELayer(nn.Module):
    def __init__(self, seqLen, channel, reduction=4):
        super(TempSELayer, self).__init__()
        self.avg_pool_HW = nn.AdaptiveAvgPool3d((seqLen, 1, 1))
        print("channel: ", channel)
        print("channel // reduction: ", channel // reduction)
        self.conv_layers = nn.Sequential(
            nn.Conv2d(channel, channel // reduction, (3, 1), padding=(1, 0)),
            nn.ReLU(inplace=True),
            nn.Conv2d(channel // reduction, channel, (3, 1), padding=(1, 0)),
        )
        self.avg_pool_seq = nn.AvgPool3d((seqLen, 1, 1), stride=1)

    def forward(self, x):
        b, c, d, _, _ = x.size()
        y = self.avg_pool_HW(x).view(b, c, d, 1)
        y = self.conv_layers(y).view(b, c, d, 1, 1)
        y = torch.clamp(y, 0, 1)
        output = self.avg_pool_seq(x * y)
        return output


class TempAvgLayer(nn.Module):
    def __init__(self, seqLen, channel, reduction=4, use_fc=False):
        print("channel: ", channel)
        print("reduction: ", reduction)
        print("channel // reduction: ", channel // reduction)
        super(TempAvgLayer, self).__init__()
        self.seqLen = seqLen
        self.use_fc = use_fc
        self.avg_pool = nn.AdaptiveAvgPool3d((seqLen, 1, 1))
        self.conv_layers = nn.Sequential(
            nn.Conv2d(channel, channel // reduction, (3, 1), padding=(1, 0)),
            nn.ReLU(inplace=True),
            nn.Conv2d(channel // reduction, channel, (3, 1), padding=(1, 0)),
        )
        if self.use_fc:
            self.fc = nn.Linear(seqLen, seqLen)
            self.fc.weight.data.fill_(seqLen / 1)
            self.fc.bias.data.zero_()

    def forward(self, x):
        b, c, d, _, _ = x.size()
        assert self.seqLen == d

        y = self.avg_pool(x).view(b, c, d, 1)
        y = self.conv_layers(y).view(b, c, d, 1, 1)

        if self.use_fc:
            y = y.squeeze(4).squeeze(3)
            y = self.fc(y)
            y_soft = y.unsqueeze(-1).unsqueeze(-1)
        else:
            y_exp = torch.exp(y)
            sum_batch = torch.sum(y_exp, 2, keepdim=True)
            y_soft = torch.div(y_exp, sum_batch)
            # (b, c, d, 1, 1)
        output = (x * y_soft).sum(2)

        return output


# class TemporalAttentionType1(nn.Module):
#     def __init__(self, seqLen, channel, reduction=4):
#         super(TemporalAttentionType1, self).__init__()
#         self.seqLen = seqLen
#         self.avg_pool = nn.AdaptiveAvgPool3d((seqLen, 1, 1))
#         self.conv_layers = nn.Sequential(
#             nn.Conv2d(channel, channel // reduction, (3, 1), padding=(1, 0)),
#             nn.ReLU(inplace=True),
#             nn.Conv2d(channel // reduction, channel, (3, 1), padding=(1, 0)),
#         )
#
#     def forward(self, x):
#         b, c, d, _, _ = x.size()
#         assert self.seqLen == d
#
#         y = self.avg_pool(x).view(b, c, d, 1)
#         y = self.conv_layers(y).view(b, c, d, 1, 1)
#
#         y_exp = torch.exp(y)
#         sum_batch = torch.sum(y_exp, 2, keepdim=True)
#         y_soft = torch.div(y_exp, sum_batch)
#         # (b, c, d, 1, 1)
#         output = x * y_soft + x
#
#         return output


class TemporalAttention(nn.Module):
    def __init__(self, seqLen, channel, reduction=4,
                 use_softmax=False,
                 use_gamma=True):
        super(TemporalAttention, self).__init__()
        self.seqLen = seqLen
        self.avg_pool = nn.AdaptiveAvgPool3d((seqLen, 1, 1))
        self.conv_layers = nn.Sequential(
            nn.Conv2d(channel, channel // reduction, (3, 1), padding=(1, 0)),
            nn.ReLU(inplace=True),
            nn.Conv2d(channel // reduction, channel, (3, 1), padding=(1, 0)),
        )

        self.use_softmax = use_softmax
        self.use_gamma = use_gamma
        if self.use_gamma:
            self.gamma = Parameter(torch.zeros(1))

    def forward(self, x):
        b, c, d, _, _ = x.size()
        assert self.seqLen == d

        y = self.avg_pool(x).view(b, c, d, 1)
        y = self.conv_layers(y).view(b, c, d, 1, 1)

        if self.use_softmax:
            y_exp = torch.exp(y)
            sum_batch = torch.sum(y_exp, 2, keepdim=True)
            y = torch.div(y_exp, sum_batch)
        # (b, c, d, 1, 1)
        if self.use_gamma:
            output = self.gamma * x * y + x
        else:
            output = x * y + x

        return output
