import torch
import torch.nn as nn


from defnet.utils.tensor_ops import cus_sample, upsample_add
from defnet.backbone.VGG import (
    Backbone_VGG_in1,
    Backbone_VGG_in3,
)
from defnet.module.MyModules import (
    EDFM,
    IDEM,
    FDM,
)

from util.misc import NestedTensor
import torch.nn.functional as F

class BasicConv2d(nn.Module):
    def __init__(
        self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False,
    ):
        super(BasicConv2d, self).__init__()

        self.basicconv = nn.Sequential(
            nn.Conv2d(
                in_planes,
                out_planes,
                kernel_size=kernel_size,
                stride=stride,
                padding=padding,
                dilation=dilation,
                groups=groups,
                bias=bias,
            ),
            nn.BatchNorm2d(out_planes),
            nn.ReLU(inplace=True),
        )

    def forward(self, x):
        return self.basicconv(x)

class DEFNet(nn.Module):
    def __init__(self, pretrained=True):
        super(DEFNet, self).__init__()
        self.num_channels = 256
        self.upsample_add = upsample_add
        self.upsample = cus_sample

        self.encoder1, self.encoder2, self.encoder4, self.encoder8, self.encoder16 = Backbone_VGG_in3(
            pretrained=pretrained
        )
        (
            self.depth_encoder1,
            self.depth_encoder2,
            self.depth_encoder4,
            self.depth_encoder8,
            self.depth_encoder16,
        ) = Backbone_VGG_in1(pretrained=pretrained)

        self.trans16 = nn.Conv2d(512, 512, 1)
        self.trans8 = nn.Conv2d(512, 512, 1)
        self.trans4 = nn.Conv2d(256, 256, 1)
        #self.trans2 = nn.Conv2d(128, 64, 1)
        #self.trans1 = nn.Conv2d(64, 32, 1)

        self.t_trans16 = IDEM(512, 512)
        self.t_trans8 = IDEM(512, 256)
        self.t_trans4 = IDEM(256, 256)
        #self.t_trans2 = IDEM(128,32)
        #self.t_trans1 = IDEM(64,64)

        self.upconv16 = BasicConv2d(512, 512, kernel_size=3, stride=1, padding=1)
        self.upconv8 = BasicConv2d(512, 256, kernel_size=3, stride=1, padding=1)
        self.upconv4 = BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1)
        #self.upconv2 = BasicConv2d(64, 32, kernel_size=3, stride=1, padding=1)
        #self.upconv1 = BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1)

        self.selfdc_16 = EDFM(512, 512)
        self.selfdc_8 = EDFM(256, 256)
        self.selfdc_4 = EDFM(256, 256)
        #self.selfdc_2 = EDFM(32,32)
        #self.selfdc_1 = EDFM(32,32)

        #self.fdm = FDM()

    def forward(self, tensor_list: NestedTensor):
        """
        [DEFNet]forward: in_data.shape=torch.Size([2, 3, 256, 256])
        [DEFNet]forward: in_depth.shape=torch.Size([2, 3, 256, 256])
        [DEFNet]forward: in_data_1.shape=torch.Size([2, 64, 256, 256])
        [DEFNet]forward: in_data_1_d.shape=torch.Size([2, 64, 256, 256])
        [DEFNet]forward: in_data_2.shape=torch.Size([2, 128, 128, 128])
        [DEFNet]forward: in_data_2_d.shape=torch.Size([2, 128, 128, 128])
        [DEFNet]forward: in_data_4.shape=torch.Size([2, 256, 64, 64])
        [DEFNet]forward: in_data_4_d.shape=torch.Size([2, 256, 64, 64])
        [DEFNet]forward: in_data_8.shape=torch.Size([2, 512, 32, 32])
        [DEFNet]forward: in_data_8_d.shape=torch.Size([2, 512, 32, 32])
        [DEFNet]forward: in_data_16.shape=torch.Size([2, 512, 16, 16])
        [DEFNet]forward: in_data_16_d.shape=torch.Size([2, 512, 16, 16])
        # t_trans
        [DEFNet]forward: in_data_1_aux.shape=torch.Size([2, 64, 256, 256])
        [DEFNet]forward: in_data_2_aux.shape=torch.Size([2, 32, 128, 128])
        [DEFNet]forward: in_data_4_aux.shape=torch.Size([2, 64, 64, 64])
        [DEFNet]forward: in_data_8_aux.shape=torch.Size([2, 64, 32, 32])
        [DEFNet]forward: in_data_16_aux.shape=torch.Size([2, 64, 16, 16])
        # trans
        [DEFNet]forward: in_data_1.shape=torch.Size([2, 32, 256, 256])
        [DEFNet]forward: in_data_2.shape=torch.Size([2, 64, 128, 128])
        [DEFNet]forward: in_data_4.shape=torch.Size([2, 64, 64, 64])
        [DEFNet]forward: in_data_8.shape=torch.Size([2, 64, 32, 32])
        [DEFNet]forward: in_data_16.shape=torch.Size([2, 64, 16, 16])
        # 
        [DEFNet]forward: out_data_1.shape=torch.Size([2, 32, 256, 256])
        [DEFNet]forward: out_data_2.shape=torch.Size([2, 32, 128, 128])
        [DEFNet]forward: out_data_4.shape=torch.Size([2, 64, 64, 64])
        [DEFNet]forward: out_data_8.shape=torch.Size([2, 64, 32, 32])
        [DEFNet]forward: out_data_16.shape=torch.Size([2, 64, 16, 16])
        # fdm
        [FDM]forward: out_data_16.shape=torch.Size([2, 32, 256, 256])
        [FDM]forward: out_data_8.shape=torch.Size([2, 32, 256, 256])
        [FDM]forward: out_data_4.shape=torch.Size([2, 32, 256, 256])
        [FDM]forward: out_data_2.shape=torch.Size([2, 32, 256, 256])
        [FDM]forward: out_data_1.shape=torch.Size([2, 32, 256, 256])
        [FDM]forward: out_data.shape=torch.Size([2, 160, 256, 256])
        [FDM]forward: out_data(reg_layer).shape=torch.Size([2, 1, 32, 32])
        [DEFNet]forward: out_data.shape=torch.Size([2, 1, 32, 32])

        # PET
        [BackboneBase_VGG]forward: xs.shape=torch.Size([8, 3, 256, 256])
        [BackboneBase_VGG]forward: xs(0).shape=torch.Size([8, 128, 128, 128])
        [BackboneBase_VGG]forward: xs(1).shape=torch.Size([8, 256, 64, 64])
        [BackboneBase_VGG]forward: xs(2).shape=torch.Size([8, 512, 32, 32])
        [BackboneBase_VGG]forward: xs(3).shape=torch.Size([8, 512, 16, 16])
        [BackboneBase_VGG]forward: features_fpn_4x.shape=torch.Size([8, 256, 64, 64])
        [BackboneBase_VGG]forward: features_fpn_8x.shape=torch.Size([8, 256, 32, 32])
        """
        in_data = tensor_list.tensors
        in_depth = tensor_list.tirs

        in_data_1 = self.encoder1(in_data)
        in_data_1_d = self.depth_encoder1(in_depth)
        in_data_2 = self.encoder2(in_data_1)
        in_data_2_d = self.depth_encoder2(in_data_1_d)
        in_data_4 = self.encoder4(in_data_2)
        in_data_4_d = self.depth_encoder4(in_data_2_d)
        in_data_8 = self.encoder8(in_data_4)
        in_data_8_d = self.depth_encoder8(in_data_4_d)
        in_data_16 = self.encoder16(in_data_8)
        in_data_16_d = self.depth_encoder16(in_data_8_d)

        #in_data_1_aux = self.t_trans1(in_data_1,in_data_1_d)
        #in_data_2_aux = self.t_trans2(in_data_2,in_data_2_d)
        in_data_4_aux = self.t_trans4(in_data_4, in_data_4_d)
        in_data_8_aux = self.t_trans8(in_data_8, in_data_8_d)
        in_data_16_aux = self.t_trans16(in_data_16, in_data_16_d)
        
        #in_data_1 = self.trans1(in_data_1)
        #in_data_2 = self.trans2(in_data_2)
        in_data_4 = self.trans4(in_data_4)
        in_data_8 = self.trans8(in_data_8)
        in_data_16 = self.trans16(in_data_16)
        
        out_data_16 = in_data_16
        out_data_16 = self.upconv16(out_data_16)
        out_data_8 = self.upsample_add(self.selfdc_16(out_data_16, in_data_16_aux), in_data_8)
        out_data_8 = self.upconv8(out_data_8)
        out_data_4 = self.upsample_add(self.selfdc_8(out_data_8, in_data_8_aux), in_data_4)
        out_data_4 = self.upconv4(out_data_4)
        """
        out_data_2 = self.upsample_add(self.selfdc_4(out_data_4, in_data_4_aux), in_data_2)
        out_data_2 = self.upconv2(out_data_2)
        out_data_1 = self.upsample_add(self.selfdc_2(out_data_2,in_data_2_aux),in_data_1)
        out_data_1 = self.upconv1(out_data_1)  # 32

        out_data = self.fdm(out_data_1,out_data_2,out_data_4,out_data_8,out_data_16)
        """
        # get tensor mask
        m = tensor_list.mask
        assert m is not None
        mask_4x = F.interpolate(m[None].float(), size=out_data_4.shape[-2:]).to(torch.bool)[0]
        mask_8x = F.interpolate(m[None].float(), size=out_data_8.shape[-2:]).to(torch.bool)[0]
        
        out: Dict[str, NestedTensor] = {}
        out['4x'] = NestedTensor(out_data_4, mask_4x)
        out['8x'] = NestedTensor(out_data_8, mask_8x)
        
        return out


def fusion_model():
    model = DEFNet()
    return model


if __name__ == "__main__":
    model = DEFNet()
    x = torch.randn(2,3,256,256)
    depth = torch.randn(2,3,256,256)
    mask = torch.randn(2,256,256)
    samples = NestedTensor(x, mask, depth)
    fuse = model(samples)
