import torch
from torch import nn
import math
from swin_transformer import *
from collections import OrderedDict
from torchinfo import summary

class TSFPFusion(nn.Module):
    def __init__(self):
        super(TSFPFusion, self).__init__()
        self.upsampling2 = nn.Upsample(scale_factor=(1, 2, 2), mode='trilinear')
        self.conv1_1x1 = nn.Conv3d(96, 192, kernel_size=(2, 1, 1), stride=(2, 1, 1))
        self.conv2_1x1 = nn.Conv3d(192, 192, kernel_size=(2, 1, 1), stride=(2, 1, 1))
        self.conv3_1x1 = nn.Conv3d(384, 192, kernel_size=(2, 1, 1), stride=(2, 1, 1))
        self.conv4_1x1 = nn.Conv3d(768, 192, kernel_size=(2, 1, 1), stride=(2, 1, 1))

        self.conv1 = nn.Conv3d(192, 192, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)
        self.conv2 = nn.Conv3d(192, 192, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)
        self.conv3 = nn.Conv3d(192, 192, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)
        

        self.max_pool = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
        self.convs2 = nn.Conv3d(192, 192, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)
        self.convs3 = nn.Conv3d(192, 192, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)
        self.convs4 = nn.Conv3d(192, 192, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False)

    def forward(self, x):
        x1, x2, x3, x4 = x

        # top-down
        x4 = self.conv4_1x1(x4)
        x3 = self.conv3_1x1(x3)
        x2 = self.conv2_1x1(x2)
        x1 = self.conv1_1x1(x1)

        t3 = x3 + self.upsampling2(x4)
        x3 = self.conv3(t3)
        t2 = x2 + self.upsampling2(x3)
        x2 = self.conv2(t2)
        t1 = x1 + self.upsampling2(x2)
        x1 = self.conv1(t1)

        # down-up
        t2 = x2 + self.max_pool(x1)
        x2 = self.convs2(t2)
        t3 = x3 + self.max_pool(x2)
        x3 = self.convs3(t3)
        t4 = x4 + self.max_pool(x3)
        x4 = self.convs4(t4)

        return [x1, x2, x3, x4]

class Gate(nn.Module):
    def __init__(self, in_plane):
        super(Gate, self).__init__()
        self.gate = nn.Conv3d(in_plane, in_plane, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1))

    def forward(self, rgb_fea):
        gate = torch.sigmoid(self.gate(rgb_fea))
        gate_fea = rgb_fea * gate + rgb_fea

        return gate_fea

class TSAttention(nn.Module):
    def __init__(self, in_plane):
        super(TSAttention, self).__init__()
        self.gate1 = Gate(in_plane)
        self.gate2 = Gate(in_plane)
        self.gate3 = Gate(in_plane)
        self.gate4 = Gate(in_plane)

    def forward(self, x):
        x1, x2, x3, x4 = x
        x1 = self.gate1(x1)
        x2 = self.gate2(x2)
        x3 = self.gate3(x3)
        x4 = self.gate4(x4)

        return [x1, x2, x3, x4]

class Decoder(nn.Module):
    def __init__(self):
        super(Decoder, self).__init__()
        self.upsampling2 = nn.Upsample(scale_factor=(1, 2, 2), mode='trilinear')
        self.upsampling4 = nn.Upsample(scale_factor=(1, 4, 4), mode='trilinear')

        self.convtsp1 = nn.Sequential(
            nn.Conv3d(192, 96, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(96, 48, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(48, 24, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            nn.Conv3d(24, 1, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False),
            nn.Sigmoid()
        )
        self.convtsp2 = nn.Sequential(
            nn.Conv3d(192, 96, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(96, 48, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(48, 24, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(24, 1, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False),
            nn.Sigmoid()
        )
        self.convtsp3 = nn.Sequential(
            nn.Conv3d(192, 96, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(96, 48, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(48, 24, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(24, 1, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False),
            self.upsampling2,
            nn.Sigmoid()
        )
        self.convtsp4 = nn.Sequential(
            nn.Conv3d(192, 96, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(96, 48, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(48, 24, kernel_size=(2, 3, 3), stride=(2, 1, 1), padding=(0, 1, 1), bias=False),
            nn.ReLU(),
            self.upsampling2,
            nn.Conv3d(24, 1, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False),
            self.upsampling4,
            nn.Sigmoid()
        )

        self.convout = nn.Sequential(
            nn.Conv3d(4, 1, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1)),
            nn.Sigmoid()
        )


    def forward(self, x):
        x1, x2, x3, x4 = x
        x1 = self.convtsp1(x1)
        x2 = self.convtsp2(x2)
        x3 = self.convtsp3(x3)
        x4 = self.convtsp4(x4)

        res = self.convout(torch.cat((x1, x2, x3, x4), 1))
        res = res.view(res.size(0), res.size(3), res.size(4))
        return res


class VideoSaliencyModel(nn.Module):
    def __init__(self, pretrain=None):
        super(VideoSaliencyModel, self).__init__()

        self.backbone = SwinTransformer3D(pretrained=pretrain)
        self.fusion = TSFPFusion()
        self.attention = TSAttention(192)
        self.decoder = Decoder()

    def forward(self, x, target=None, task_id=-1, video_idx=-1):
        _, x = self.backbone(x, target, task_id, video_idx)
        x = self.fusion(x)
        x = self.attention(x)
        return self.decoder(x)

    def get_state_memory(self):
        return [self.backbone.memorybank.memory_bank, self.backbone.memorybank.saliency_bank]

    def set_state_memory(self, memory):
        self.backbone.memorybank.memory_bank = memory[0]
        self.backbone.memorybank.saliency_bank = memory[1]

if __name__ == "__main__":
    # model = SwinTransformer3D()
    # _, [x, y, z, m] = model(torch.rand(1, 3, 32, 384, 224))
    # print(x.shape, y.shape, z.shape, m.shape)
    model = VideoSaliencyModel()
    summary(model, (1, 3, 32, 384, 224), depth=1)


    