import torch
from torch import nn
import math
from swin_transformer import *
from collections import OrderedDict
from torchinfo import summary

class TSFPFusion(nn.Module):
    def __init__(self):
        super(TSFPFusion, self).__init__()
        self.upsampling2 = nn.Upsample(scale_factor=(1, 2, 2), mode='trilinear')
        self.conv1_1x1 = nn.Conv3d(96, 192, kernel_size=(2, 1, 1), stride=(2, 1, 1))
        self.conv2_1x1 = nn.Conv3d(192, 192, kernel_size=(2, 1, 1), stride=(2, 1, 1))
        self.conv3_1x1 = nn.Conv3d(384, 192, kernel_size=(2, 1, 1), stride=(2, 1, 1))
        self.conv4_1x1 = nn.Conv3d(768, 192, kernel_size=(2, 1, 1), stride=(2, 1, 1))

        self.conv1 = nn.Conv3d(192, 192, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)
        self.conv2 = nn.Conv3d(192, 192, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)
        self.conv3 = nn.Conv3d(192, 192, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)
        

        self.max_pool = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
        self.convs2 = nn.Conv3d(192, 192, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)
        self.convs3 = nn.Conv3d(192, 192, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)
        self.convs4 = nn.Conv3d(192, 192, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)

    def forward(self, x):
        x1, x2, x3, x4 = x

        # top-down
        x4 = self.conv4_1x1(x4)
        x3 = self.conv3_1x1(x3)
        x2 = self.conv2_1x1(x2)
        x1 = self.conv1_1x1(x1)

        t3 = x3 + self.upsampling2(x4)
        x3 = self.conv3(t3)
        t2 = x2 + self.upsampling2(x3)
        x2 = self.conv2(t2)
        t1 = x1 + self.upsampling2(x2)
        x1 = self.conv1(t1)

        # down-up
        t2 = x2 + self.max_pool(x1)
        x2 = self.convs2(t2)
        t3 = x3 + self.max_pool(x2)
        x3 = self.convs3(t3)
        t4 = x4 + self.max_pool(x3)
        x4 = self.convs4(t4)

        return [x1, x2, x3, x4]

class InceptionDWConv3d(nn.Module):
    """ Inception depthwise convolution for 3D data
    """
    def __init__(self, in_channels, cube_kernel_size=3, band_kernel_size=11, branch_ratio=0.125):
        super().__init__()

        gc = int(in_channels * branch_ratio)  # channel numbers of a convolution branch
        self.dwconv_hwd = nn.Conv3d(gc, gc, cube_kernel_size, padding=cube_kernel_size // 2, groups=gc)
        self.dwconv_wd = nn.Conv3d(gc, gc, kernel_size=(1, 1, band_kernel_size), padding=(0, 0, band_kernel_size // 2),
                                   groups=gc)
        self.dwconv_hd = nn.Conv3d(gc, gc, kernel_size=(1, band_kernel_size, 1), padding=(0, band_kernel_size // 2, 0),
                                   groups=gc)
        self.dwconv_hw = nn.Conv3d(gc, gc, kernel_size=(band_kernel_size, 1, 1), padding=(band_kernel_size // 2, 0, 0),
                                   groups=gc)
        self.split_indexes = (in_channels - 4 * gc, gc, gc, gc, gc)

    def forward(self, x):
        x_id, x_hwd, x_wd, x_hd, x_hw = torch.split(x, self.split_indexes, dim=1)
        return torch.cat(
            (x_id, self.dwconv_hwd(x_hwd), self.dwconv_wd(x_wd), self.dwconv_hd(x_hd), self.dwconv_hw(x_hw)),
            dim=1,
        )


class TSAttention(nn.Module):
    def __init__(self, in_plane):
        super(TSAttention, self).__init__()
        self.gate1 = InceptionDWConv3d(in_plane)
        self.gate2 = InceptionDWConv3d(in_plane)
        self.gate3 = InceptionDWConv3d(in_plane)
        self.gate4 = InceptionDWConv3d(in_plane)
        self.ln1 = nn.LayerNorm([192, 8, 56, 96])
        self.ln2 = nn.LayerNorm([192, 8, 28, 48])
        self.ln3 = nn.LayerNorm([192, 8, 14, 24])
        self.ln4 = nn.LayerNorm([192, 8, 7, 12])


    def forward(self, x):
        x1, x2, x3, x4 = x
        gate1 = torch.sigmoid(self.ln1(self.gate1(x1))) 
        gate2 = torch.sigmoid(self.ln2(self.gate2(x2)))
        gate3 = torch.sigmoid(self.ln3(self.gate3(x3)))
        gate4 = torch.sigmoid(self.ln4(self.gate3(x4)))

        x1 = x1 * gate1 + x1
        x2 = x2 * gate2 + x2
        x3 = x3 * gate3 + x3
        x4 = x4 * gate4 + x4

        return [x1, x2, x3, x4]

class Decoder(nn.Module):
    def __init__(self):
        super(Decoder, self).__init__()
        self.upsampling2 = nn.Upsample(scale_factor=(1, 2, 2), mode='trilinear')
        self.upsampling4 = nn.Upsample(scale_factor=(1, 4, 4), mode='trilinear')

        self.convtsp1 = nn.Sequential(
            nn.Conv3d(192, 96, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(96, 48, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(48, 24, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            nn.Conv3d(24, 1, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False),
            nn.Sigmoid()
        )
        self.convtsp2 = nn.Sequential(
            nn.Conv3d(192, 96, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(96, 48, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(48, 24, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(24, 1, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False),
            nn.Sigmoid()
        )
        self.convtsp3 = nn.Sequential(
            nn.Conv3d(192, 96, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(96, 48, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(48, 24, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(24, 1, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False),
            self.upsampling2,
            nn.Sigmoid()
        )
        self.convtsp4 = nn.Sequential(
            nn.Conv3d(192, 96, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(96, 48, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(48, 24, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(24, 1, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False),
            self.upsampling4,
            nn.Sigmoid()
        )

        self.convout = nn.Sequential(
            nn.Conv3d(4, 1, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1)),
            nn.Sigmoid()
        )


    def forward(self, x):
        x1, x2, x3, x4 = x
        x1 = self.convtsp1(x1)
        x2 = self.convtsp2(x2)
        x3 = self.convtsp3(x3)
        x4 = self.convtsp4(x4)

        res = self.convout(torch.cat((x1, x2, x3, x4), 1))
        res = res.view(res.size(0), res.size(3), res.size(4))
        return res


class VideoSaliencyModel(nn.Module):
    def __init__(self, pretrain=None):
        super(VideoSaliencyModel, self).__init__()

        self.backbone = SwinTransformer3D(pretrained=pretrain)
        self.fusion = TSFPFusion()
        self.attention = TSAttention(192)
        self.decoder = Decoder()

    def forward(self, x):
        _, x = self.backbone(x)
        x = self.fusion(x)
        x = self.attention(x)
        return self.decoder(x)



if __name__ == "__main__":
    # model = SwinTransformer3D()
    # _, [x, y, z, m] = model(torch.rand(1, 3, 32, 384, 224))
    # print(x.shape, y.shape, z.shape, m.shape)
    model = VideoSaliencyModel()
    summary(model, (1, 3, 32, 384, 224), depth=1)


    