import torch
import torch.nn as nn
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from functools import partial
from typing import List
from torch import Tensor
import copy
import os


class PIIConv(nn.Module):

    def __init__(self, in_dim, out_dim, n_div, forward='split_cat'):
        super().__init__()
        self.in_dim = int(in_dim / 2)
        self.out_dim = int(out_dim / 2)
        self.dim_conv3 = self.in_dim // n_div
        self.dim_untouched = in_dim - 2 * self.dim_conv3
        
        self.rgbt_conv1 = nn.Conv2d(self.dim_untouched, int(self.dim_untouched / 2), 1, 1, bias=False)
        self.rgb_partial_conv3 = nn.Conv2d(self.dim_conv3, self.dim_conv3, 3, 1, 1, bias=False)
        self.rgb_conv1 = nn.Conv2d(self.in_dim, self.out_dim, 1, 1, bias=False)
        self.tir_partial_conv3 = nn.Conv2d(self.dim_conv3, self.dim_conv3, 3, 1, 1, bias=False)
        self.tir_conv1 = nn.Conv2d(self.in_dim, self.out_dim, 1, 1, bias=False)
        
        if forward == 'slicing':
            self.forward = self.forward_slicing
        elif forward == 'split_cat':
            self.forward = self.forward_split_cat
        else:
            raise NotImplementedError

    def forward_slicing(self, x: Tensor) -> Tensor:
        # only for inference
        x = x.clone()   # !!! Keep the original input intact for the residual connection later
        x[:, :self.dim_conv3, :, :] = self.partial_conv3(x[:, :self.dim_conv3, :, :])

        return x

    def forward_split_cat(self, x: Tensor) -> Tensor:
        # for training/inference
        rgb, tmp, tir = torch.split(x, [self.dim_conv3, self.dim_untouched, self.dim_conv3], dim=1)
        tmp = self.rgbt_conv1(tmp)
        
        rgb = self.rgb_partial_conv3(rgb)
        rgb = torch.cat((rgb, tmp), 1)
        rgb = self.rgb_conv1(rgb)
        
        tir = self.tir_partial_conv3(tir)
        tir = torch.cat((tmp, tir), 1)
        tir = self.tir_conv1(tir)
        
        out = torch.cat((rgb, tir), 1)
        return out
        

class GCFusion(nn.Module):
    
    def __init__(self, dim):
        super().__init__()

        local_att_rgb: List[nn.Module] = [
            nn.Conv2d(2, 1, 3, stride=2, padding=1, dilation=1),
            nn.BatchNorm2d(1),
            nn.ReLU(inplace=True),
            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
            nn.BatchNorm2d(1),
        ]
        self.local_att_rgb = nn.Sequential(*local_att_rgb)
        
        local_att_tir: List[nn.Module] = [
            nn.Conv2d(2, 1, 3, stride=2, padding=1, dilation=1),
            nn.BatchNorm2d(1),
            nn.ReLU(inplace=True),
            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
            nn.BatchNorm2d(1),
        ]
        self.local_att_tir = nn.Sequential(*local_att_tir)

        self.dim = int(dim/2)
        global_att: List[nn.Module] = [
            nn.Conv2d(self.dim, 1, 3, stride=2, padding=1, dilation=1),
            nn.BatchNorm2d(1),
            nn.ReLU(inplace=True),
            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
            nn.BatchNorm2d(1),
        ]
        self.global_att = nn.Sequential(*global_att)
        
        self.sigmoid = nn.Sigmoid()
        
    
    
    def forward(self, x):
        rgb, tir = x.chunk(2, dim=1)
        x = rgb + tir
        
        rgb_mean = torch.mean(rgb, dim=1, keepdim=True)
        rgb_max, _ = torch.max(rgb, dim=1, keepdim=True)
        rgb_local = torch.cat([rgb_mean, rgb_max], dim=1)
        rgb_local = self.local_att_rgb(rgb_local)
        
        tir_mean = torch.mean(tir, dim=1, keepdim=True)
        tir_max, _ = torch.max(tir, dim=1, keepdim=True)
        tir_local = torch.cat([tir_mean, tir_max], dim=1)
        tir_local = self.local_att_tir(tir_local)
        
        
        x_global = self.global_att(x)
        
        att_rgb = self.sigmoid(rgb_local + x_global)
        att_tir = self.sigmoid(tir_local + x_global)
  
        d_rgb = rgb * att_rgb
        d_tir = tir * att_tir
        
        rgb = rgb + d_tir
        tir = tir + d_rgb
        out = torch.cat((rgb, tir), 1)
        
        return out
        
class PIIStage(nn.Module):

    def __init__(self, in_dim, out_dim, depth, n_div, sample=True):
        super().__init__()
        
        self.in_dim = in_dim
        block_list = []
        for i in range(depth):
            block = [PIIConv(self.in_dim, out_dim, n_div)]
            self.in_dim = out_dim
            block.append(nn.ReLU(inplace=True))
            if (i == depth - 1) and sample:
                block.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True))
            block_list.append(nn.Sequential(*block))
        block_list.append(GCFusion(out_dim))
        self.blocks = nn.Sequential(*block_list)

    def forward(self, x: Tensor) -> Tensor:
        out = self.blocks(x)
        return out
        
        
class ConvStage(nn.Module):

    def __init__(self, dim):
        super().__init__()

        self.dim = int(dim/2)
        rgb_block: List[nn.Module] = [
            nn.Conv2d(3, self.dim, 3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(self.dim, self.dim, 3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
        ]
        self.rgb_block = nn.Sequential(*rgb_block)
        
        tir_block: List[nn.Module] = [
            nn.Conv2d(3, self.dim, 3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(self.dim, self.dim, 3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
        ]
        self.tir_block = nn.Sequential(*tir_block)

    def forward(self, x: Tensor) -> Tensor:
        rgb, tir = x.chunk(2, dim=1)
        rgb = self.rgb_block(rgb)
        tir = self.tir_block(tir)
        out = torch.cat((rgb, tir), 1)
        return out
        
        
class PIIGCNet(nn.Module):

    def __init__(self,
                 embed_dims=(128, 256, 512, 1024, 1024),
                 depths=(0, 2, 4, 4, 4),
                 n_div=4,
                 ):
        super().__init__()
        
        self.stage1 = ConvStage(dim=embed_dims[0])
        self.stage2 = PIIStage(embed_dims[0], embed_dims[1], depths[1], n_div)
        self.stage3 = PIIStage(embed_dims[1], embed_dims[2], depths[2], n_div)
        self.stage4 = PIIStage(embed_dims[2], embed_dims[3], depths[3], n_div)
        self.stage5 = PIIStage(embed_dims[3], embed_dims[4], depths[4], n_div, sample=False)
        
        self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
        self.conv1_1 = nn.Conv2d(embed_dims[2], embed_dims[4], 1, 1, bias=False)
        
        self.reg_layer = nn.Sequential(
            nn.Conv2d(embed_dims[4], embed_dims[2], kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(embed_dims[2], embed_dims[1], kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(embed_dims[1], embed_dims[0], kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(embed_dims[0], 1, 1)
        )
        
        self._initialize_weights()
        
    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
    
    def forward(self, rgbt):
        rgb = rgbt[0]
        tir = rgbt[1]
        rgbt = torch.cat((rgb, tir), 1)
        
        rgbt1 = self.stage1(rgbt)
        rgbt2 = self.stage2(rgbt1)
        rgbt3 = self.stage3(rgbt2)
        rgbt4 = self.stage4(rgbt3)
        rgbt5 = self.stage5(rgbt4)
        
        rgbt5 = self.upsample(rgbt5)
        rgbt3 = self.conv1_1(rgbt3)
        
        out = rgbt5 + rgbt3
        density = self.reg_layer(out)
        return torch.abs(density)


def piigcnet_(n_div):
    model = PIIGCNet(n_div=n_div)
    return model
    
    
if __name__ == "__main__":
    rgb = torch.randn(1, 3, 256, 256)
    tir = torch.randn(1, 3, 256, 256)
    inputs = [rgb, tir]
    model = piigcnet_(4)
    
    densi_map = model(inputs)
    
    print(f"densi_map.shape={densi_map.shape}")
