import torch
import torch.nn as nn
from torchvision import models

from models.modules.utils import up_conv4
from models.modules.graph_mamba import create_ssm_block, GraphSSMBlock, _init_weights
from models.modules.joint_mamba import JointMamba


class RGBTCC(nn.Module):
    def __init__(self, pretrained=True):
        super(RGBTCC, self).__init__()
        
        in_dim = 256
        self.down = nn.Sequential(
            nn.Conv2d(512, in_dim, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(in_dim),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
        )
        
        # regress  
        self.last_layer_rgbt = nn.Sequential(
            nn.Conv2d(
                in_channels=256,
                out_channels=256,
                kernel_size=1,
                stride=1,
                padding=0),
            nn.BatchNorm2d(256, momentum=0.01),
            nn.ReLU(inplace=True),
            nn.ConvTranspose2d(256, 128, 4, stride=2, padding=1, output_padding=0, bias=True),
            nn.ReLU(inplace=True),
            nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1, output_padding=0, bias=True),
            nn.ReLU(inplace=True),
            nn.ConvTranspose2d(64, 32, 4, stride=2, padding=1, output_padding=0, bias=True),
            nn.ReLU(inplace=True),
            nn.ConvTranspose2d(32, 1, 4, stride=2, padding=1, output_padding=0, bias=True),
        )

        self._weight_init_()
              
        # Using VGG16 as backbone network
        vgg = models.vgg16_bn(weights=models.VGG16_BN_Weights.DEFAULT)

        # Partition VGG16 into five encoder blocks
        features = list(vgg.features.children())
        
        self.rgb1 = nn.Sequential(*features[0:6])
        self.rgb2 = nn.Sequential(*features[6:13])
        self.rgb4 = nn.Sequential(*features[13:23])
        self.rgb8 = nn.Sequential(*features[23:33])

        # mamba
        self.jmamba = JointMamba(
            in_dim,
            4,
            rms_norm=True,
            residual_in_fp32=True,
            fused_add_norm=True,
        )
        
        self.gmamba = GraphSSMBlock(
                channels=in_dim,
                depth=2,
                mlp_ratio=4.0,
                drop=0.0,
                drop_path=[0.4722222089767456, 0.5],
                act_layer='GELU',
                norm_layer='LN',
                post_norm=False,
                downsample=False,
                layer_scale=None,
                with_cp=False,
            )
        self.gmamba.apply(_init_weights)

    def _weight_init_(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.normal_(m.weight, std=0.01)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
                
    def forward(self, RGBT):
        B, _, H, W = RGBT[0].shape
        device = RGBT[0].device

        rgb = RGBT[0]
        tir = RGBT[1]
        rgbt = torch.cat([rgb, tir], 0)

        rgbt1 = self.rgb1(rgbt)
        rgbt2 = self.rgb2(rgbt1)
        rgbt4 = self.rgb4(rgbt2)
        rgbt8 = self.rgb8(rgbt4)
        rgbt16 = self.down(rgbt8)

        rgb, tir = rgbt16.split(B)
        rgb, tir, _ = self.jmamba(rgb, tir, B, H, W, 16, device) 
        
        rgbt = rgb + tir
        rgbt = rgbt.permute(0, 2, 3, 1)
        rgbt = self.gmamba(rgbt)
        rgbt = rgbt.permute(0, 3, 1, 2)        

        rgbt = self.last_layer_rgbt(rgbt)

        return rgbt


class RGBTCC_src(nn.Module):
    def __init__(self, pretrained=True):
        super(RGBTCC, self).__init__()
        
        self.up_conv_rgbt = up_conv4(1024, 512, 256)
        
        # regress
        self.last_layer_rgbt = nn.Sequential(
            nn.Conv2d(
                in_channels=256,
                out_channels=256,
                kernel_size=1,
                stride=1,
                padding=0),
            nn.BatchNorm2d(256, momentum=0.01),
            nn.ReLU(inplace=True),
            nn.ConvTranspose2d(256, 128, 4, stride=2, padding=1, output_padding=0, bias=True),
            nn.ReLU(inplace=True),
            nn.ConvTranspose2d(128, 1, 4, stride=2, padding=1, output_padding=0, bias=True),
        )
        
        self._weight_init_()
        
        self.gmamba = create_ssm_block()
        
        # Using VGG16 as backbone network
        vgg = models.vgg16_bn(weights=models.VGG16_BN_Weights.DEFAULT)
        
        # Partition VGG16 into five encoder blocks
        features = list(vgg.features.children())
        
        self.rgb1 = nn.Sequential(*features[0:6])
        self.rgb2 = nn.Sequential(*features[6:13])
        self.rgb4 = nn.Sequential(*features[13:23])
        self.rgb8 = nn.Sequential(*features[23:33])

        # mamba
        self.jmamba = JointMamba(
            512,
            4,
            rms_norm=True,
            residual_in_fp32=True,
            fused_add_norm=True,
        )
    
    def _weight_init_(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.normal_(m.weight, std=0.01)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
                
    def forward(self, RGBT):
        B, _, H, W = RGBT[0].shape
        device = RGBT[0].device

        rgb = RGBT[0]
        tir = RGBT[1]
        rgbt = torch.cat([rgb, tir], 0)
        
        rgbt = self.rgb1(rgbt)
        rgbt = self.rgb2(rgbt)
        rgbt = self.rgb4(rgbt)
        rgbt = self.rgb8(rgbt)
        
        rgb, tir = rgbt.split(B)
        rgb, tir, _ = self.jmamba(rgb, tir, B, H, W, 8, device) 
        
        rgbt = torch.cat([rgb, tir], 1)
        rgbt = rgbt.permute(0, 2, 3, 1)
        rgbt = self.gmamba(rgbt)
        rgbt = rgbt.permute(0, 3, 1, 2)        

        rgbt = self.up_conv_rgbt(rgbt)
        rgbt = self.last_layer_rgbt(rgbt)

        return rgbt
        
        
def get_model(train=False):
    model = RGBTCC()
    return model


if __name__ == '__main__':
    from torchsummary import summary

    model = get_seg_model().cuda()
    print(model)
    summary(model, (3, 224, 224))
