import math
import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
import torchvision
import torch.utils.model_zoo as model_zoo
from torch.nn import init
from torchvision.models import ResNet
from models.base_model import init_net

def define_network(opt):
    net = eval(opt.arch_name)(opt)
    return init_net(net,gpu_ids=opt.gpu_ids,init_type=opt.init_type)

class Conv2d_cd(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
                 padding=1, dilation=1, groups=1, bias=False, theta=0.7):

        super(Conv2d_cd, self).__init__() 
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
        self.theta = theta

    def forward(self, x):
        out_normal = self.conv(x)

        if math.fabs(self.theta - 0.0) < 1e-8:
            return out_normal 
        else:
            #pdb.set_trace()
            [C_out,C_in, kernel_size,kernel_size] = self.conv.weight.shape
            kernel_diff = self.conv.weight.sum(2).sum(2)
            kernel_diff = kernel_diff[:, :, None, None]
            out_diff = F.conv2d(input=x, weight=kernel_diff, bias=self.conv.bias, stride=self.conv.stride, padding=0, dilation=self.conv.dilation, groups=self.conv.groups)

            return out_normal - self.theta * out_diff


class SpatialAttention(nn.Module):
    def __init__(self, kernel = 3):
        super(SpatialAttention, self).__init__()


        self.conv1 = nn.Conv2d(2, 1, kernel_size=kernel, padding=kernel//2, bias=False)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        x = torch.cat([avg_out, max_out], dim=1)
        x = self.conv1(x)
        
        return self.sigmoid(x)

class cdcn_multi2(nn.Module):
    
    def __init__(self, basic_conv=Conv2d_cd, theta=0.7):   
        super(cdcn_multi2, self).__init__()
        
        self.conv1_M1 = nn.Sequential(
            basic_conv(3, 64, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(64),
            nn.ReLU(),    
        )
        
        self.Block1_M1 = nn.Sequential(
            basic_conv(64, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(128),
            nn.ReLU(),   
            basic_conv(128, 196, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(196),
            nn.ReLU(),  
            basic_conv(196, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(128),
            nn.ReLU(),   
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
            
        )
        
        self.Block2_M1 = nn.Sequential(
            basic_conv(128, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(128),
            nn.ReLU(),   
            basic_conv(128, 196, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(196),
            nn.ReLU(),  
            basic_conv(196, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(128),
            nn.ReLU(),  
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
        )
        
        self.Block3_M1 = nn.Sequential(
            basic_conv(128, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(128),
            nn.ReLU(),   
            basic_conv(128, 196, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(196),
            nn.ReLU(),  
            basic_conv(196, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(128),
            nn.ReLU(),   
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
        )
        
        self.conv1_M2 = nn.Sequential(
            basic_conv(3, 64, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(64),
            nn.ReLU(),    
        )
        
        self.Block1_M2 = nn.Sequential(
            basic_conv(64, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(128),
            nn.ReLU(),   
            basic_conv(128, 196, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(196),
            nn.ReLU(),  
            basic_conv(196, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(128),
            nn.ReLU(),   
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
            
        )
        
        self.Block2_M2 = nn.Sequential(
            basic_conv(128, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(128),
            nn.ReLU(),   
            basic_conv(128, 196, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(196),
            nn.ReLU(),  
            basic_conv(196, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(128),
            nn.ReLU(),  
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
        )
        
        self.Block3_M2 = nn.Sequential(
            basic_conv(128, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(128),
            nn.ReLU(),   
            basic_conv(128, 196, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(196),
            nn.ReLU(),  
            basic_conv(196, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(128),
            nn.ReLU(),   
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
        )
        
        # self.conv1_M3 = nn.Sequential(
        #     basic_conv(3, 64, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
        #     nn.BatchNorm2d(64),
        #     nn.ReLU(),    
        # )
        
        # self.Block1_M3 = nn.Sequential(
        #     basic_conv(64, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
        #     nn.BatchNorm2d(128),
        #     nn.ReLU(),   
        #     basic_conv(128, 196, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
        #     nn.BatchNorm2d(196),
        #     nn.ReLU(),  
        #     basic_conv(196, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
        #     nn.BatchNorm2d(128),
        #     nn.ReLU(),   
        #     nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
            
        # )
        
        # self.Block2_M3 = nn.Sequential(
        #     basic_conv(128, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
        #     nn.BatchNorm2d(128),
        #     nn.ReLU(),   
        #     basic_conv(128, 196, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
        #     nn.BatchNorm2d(196),
        #     nn.ReLU(),  
        #     basic_conv(196, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
        #     nn.BatchNorm2d(128),
        #     nn.ReLU(),  
        #     nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
        # )
        
        # self.Block3_M3 = nn.Sequential(
        #     basic_conv(128, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
        #     nn.BatchNorm2d(128),
        #     nn.ReLU(),   
        #     basic_conv(128, 196, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
        #     nn.BatchNorm2d(196),
        #     nn.ReLU(),  
        #     basic_conv(196, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
        #     nn.BatchNorm2d(128),
        #     nn.ReLU(),   
        #     nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
        # )
        
        self.lastconv1_M1 = nn.Sequential(
            basic_conv(128*3, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(128),
            nn.ReLU(),    
        )
        self.lastconv1_M2 = nn.Sequential(
            basic_conv(128*3, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(128),
            nn.ReLU(),    
        )
        # self.lastconv1_M3 = nn.Sequential(
        #     basic_conv(128*3, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
        #     nn.BatchNorm2d(128),
        #     nn.ReLU(),    
        # )
        
        
        self.lastconv2 = nn.Sequential(
            basic_conv(128*2, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(128),
            nn.ReLU(),    
        )
        
        
        self.lastconv3 = nn.Sequential(
            basic_conv(128, 1, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.ReLU(),    
        )
        
        self.fc1 = nn.Linear(1024,1024)
        self.fc2 = nn.Linear(1024,2)
        
        self.downsample32x32 = nn.Upsample(size=(32, 32), mode='bilinear')

 
    def forward(self, x1, x2):	    	
        
        # RGB
        # x_input = x1
        x = self.conv1_M1(x1)
        
        x = self.Block1_M1(x)	    	    	
        x_Block1 = self.downsample32x32(x)   
        
        x = self.Block2_M1(x)	    
        x_Block2 = self.downsample32x32(x)   
        
        x = self.Block3_M1(x)	    
        x_Block3 = self.downsample32x32(x)  
        
        x_M1 = torch.cat((x_Block1,x_Block2,x_Block3), dim=1) 
        
        # IR
        x = self.conv1_M2(x2)
        
        x = self.Block1_M2(x)	    	    	
        x_Block1 = self.downsample32x32(x)   
        
        x = self.Block2_M2(x)	    
        x_Block2 = self.downsample32x32(x)   
        
        x = self.Block3_M2(x)	    
        x_Block3 = self.downsample32x32(x)  
        
        x_M2 = torch.cat((x_Block1,x_Block2,x_Block3), dim=1)
        
        # Depth
        # x = self.conv1_M3(x3)		   
        
        # x_Block1_M3 = self.Block1_M3(x)	    	    	
        # x_Block1_32x32_M3 = self.downsample32x32(x_Block1_M3)   
        
        # x_Block2_M3 = self.Block2_M3(x_Block1_M3)	    
        # x_Block2_32x32_M3 = self.downsample32x32(x_Block2_M1)   
        
        # x_Block3_M3 = self.Block3_M3(x_Block2_M3)	    
        # x_Block3_32x32_M3 = self.downsample32x32(x_Block3_M3)   
        
        # x_concat_M3 = torch.cat((x_Block1_32x32_M3,x_Block2_32x32_M3,x_Block3_32x32_M3), dim=1)
        

        
        x_M1 = self.lastconv1_M1(x_M1)    
        x_M2 = self.lastconv1_M2(x_M2)    
        # x_M3 = self.lastconv1_M3(x_concat_M3)    
        
        # x = torch.cat((x_M1,x_M2,x_M3), dim=1)
        x = torch.cat((x_M1,x_M2), dim=1)
        
        x = self.lastconv2(x)
        x = self.lastconv3(x)
        
        # map_x = x.squeeze(1)
         
        x = x.view(x.size(0),-1)
        x = self.fc1(x)
        x = self.fc2(x)
        
        return {'prediction':x}
class cdcn(nn.Module):

    def __init__(self,opt, basic_conv=Conv2d_cd, theta=0.7 ):   
        super(cdcn, self).__init__()
        
        self.conv1 = nn.Sequential(
            basic_conv(opt.input_nc, 80, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(80),
            nn.ReLU(),    
        )
        
        self.Block1 = nn.Sequential(
            basic_conv(80, 160, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(160),
            nn.ReLU(),  
            
            basic_conv(160, int(160*1.6), kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(int(160*1.6)),
            nn.ReLU(),  
            basic_conv(int(160*1.6), 160, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(160),
            nn.ReLU(), 
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
        )
        
        self.Block2 = nn.Sequential(
            basic_conv(160, int(160*1.2), kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(int(160*1.2)),
            nn.ReLU(),  
            basic_conv(int(160*1.2), 160, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(160),
            nn.ReLU(),  
            basic_conv(160, int(160*1.4), kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(int(160*1.4)),
            nn.ReLU(),  
            basic_conv(int(160*1.4), 160, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(160),
            nn.ReLU(),  
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
        )
        
        self.Block3 = nn.Sequential(
            basic_conv(160, 160, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(160),
            nn.ReLU(), 
            basic_conv(160, int(160*1.2), kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(int(160*1.2)),
            nn.ReLU(),  
            basic_conv(int(160*1.2), 160, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(160),
            nn.ReLU(), 
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
        )
        
        # Original
        
        self.lastconv1 = nn.Sequential(
            basic_conv(160*3, 160, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.BatchNorm2d(160),
            nn.ReLU(),
            basic_conv(160, 1, kernel_size=3, stride=1, padding=1, bias=False, theta= theta),
            nn.ReLU(),    
        )
        
      
        self.sa1 = SpatialAttention(kernel = 7)
        self.sa2 = SpatialAttention(kernel = 5)
        self.sa3 = SpatialAttention(kernel = 3)
        
        self.fc1 = nn.Linear(1024,1024)
        self.fc2 = nn.Linear(1024,opt.num_classes)
        
        self.downsample32x32 = nn.Upsample(size=(32, 32), mode='bilinear')

 
    def forward(self, x):	    	# x [3, 256, 256]

        x = self.conv1(x)		   
        
        x = self.Block1(x)	    	    	
        attention1 = self.sa1(x)
        attention1 = attention1 * x
        attention1 = self.downsample32x32(attention1)   
        
        x = self.Block2(x)	    
        attention2 = self.sa2(x)  
        attention2 = attention2 * x
        attention2 = self.downsample32x32(attention2)  
        
        x = self.Block3(x)	    
        attention3 = self.sa3(x)  
        attention3 = attention3 * x	
        attention3 = self.downsample32x32(attention3)   
        
        x = torch.cat((attention1,attention2,attention3), dim=1)    

        x = self.lastconv1(x)
        fea = x.view(x.size(0),-1)
        x = self.fc1(fea)
        x = self.fc2(x)
        return {'feature':fea,'prediction':x}

class vgg(nn.Module):
    def __init__(self,opt):
                #  model_name='vgg16bn',
                #  pretrained=True,
                #  in_ch=5,
                #  out_ch=2):
        super(vgg, self).__init__()
        model_name=opt.arch_name + opt.arch_type
        # model_name = opt.arch_name
        model_name = model_name.split('_')
        if model_name[0] == 'vgg16bn':
            model = torchvision.models.vgg16_bn(opt.pretrained)
            feat_ch = 512
        elif model_name[0] == 'vgg11bn':
            model = torchvision.models.vgg11_bn(opt.pretrained)
            feat_ch = 512
        else:
            raise ValueError('Unsupported model: ' + model_name[0])

        self.features = model.features
        self.features[0] = nn.Conv2d(opt.input_nc, 64, 3, 1, 1)
        self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))

        init.kaiming_normal_(self.features[0].weight, a=0, mode='fan_in')

        dense = True if model_name[-1] == 'd' else False
        self.classifier = Linear(feat_ch, opt.num_classes, dense)

    def forward(self, input):
        feature = self.features(input)
        feature = self.avgpool(feature)
        feature = feature.view(-1, feature.shape[1])
        prediction = self.classifier(feature)
        return {'feature': feature, 'prediction': prediction}


class resnet(nn.Module):
    def __init__(self,opt):
                #  model_name='resnet18',
                #  pretrained=True,
                #  opt.input_nc,=5,
                #  out_ch=2):
        super(resnet, self).__init__()
        model_name=opt.arch_name + opt.arch_type
        model_name = model_name.split('_')
        if model_name[0] == 'resnet18':
            model = torchvision.models.resnet18(opt.pretrained)
            feat_ch = 512
        elif model_name[0] == 'resnet34':
            model = torchvision.models.resnet34(opt.pretrained)
            feat_ch = 512
        elif model_name[0] == 'resnet50':
            model = torchvision.models.resnet50(opt.pretrained)
            feat_ch = 2048
        elif model_name[0] == 'resnet50wide':
            model = torchvision.models.wide_resnet50_2(opt.pretrained)
            feat_ch = 2048
        elif model_name[0] == 'resnet101':
            model = torchvision.models.resnet101(opt.pretrained)
            feat_ch = 2048
        elif model_name[0] == 'senet18':
            model = se_resnet18(2)
            feat_ch = 512
        elif model_name[0] == 'senet50':
            model = se_resnet50(2)
            feat_ch = 2048
        elif model_name[0] == 'senet101':
            model = se_resnet101(2)
            feat_ch = 2048
        else:
            raise ValueError('Unsupported model: ' + model_name[0])

        self.features = nn.Sequential(
            nn.Conv2d(opt.input_nc, 64, kernel_size=7, stride=2, padding=3),
            model.bn1,
            model.relu,
            model.maxpool,
            model.layer1,
            model.layer2,
            model.layer3,
            model.layer4,
        )
        self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))

        dense = True if model_name[-1] == 'd' else False
        self.classifier = Linear(feat_ch, opt.num_classes, dense)

        init.kaiming_normal_(self.features[0].weight, a=0, mode='fan_in')

    def forward(self, input):
        feature = self.features(input)
        feature = self.avgpool(feature)
        feature = feature.view(-1, feature.shape[1])
        prediction = self.classifier(feature)
        return {'feature': feature, 'prediction': prediction}


class densenet(nn.Module):
    def __init__(self,opt):
                #  pretrained=True, opt.input_nc,=5, out_ch=2):
        super(densenet, self).__init__()
        model = torchvision.models.densenet161(opt.input_nc,)

        self.features = model.features
        self.features.conv0 = nn.Conv2d(opt.input_nc,
                                        96,
                                        kernel_size=(7, 7),
                                        stride=(2, 2),
                                        padding=(3, 3),
                                        bias=False)
        self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
        self.classifier = Linear(2208, opt.num_classes)

        init.kaiming_normal_(self.features.conv0.weight, a=0, mode='fan_in')

    def forward(self, input):
        feature = self.features(input)
        feature = self.avgpool(feature)
        feature = feature.view(-1, feature.shape[1])
        prediction = self.classifier(feature)
        return {'feature': feature, 'prediction': prediction}


class mobilenet(nn.Module):
    def __init__(self,opt):
                #  opt.opt.input_nc,=True, in_ch=5, out_ch=2):
        super(mobilenet, self).__init__()

        model = torchvision.models.mobilenet_v2(opt.input_nc,)

        self.features = model.features
        self.features[0] = nn.Conv2d(opt.input_nc,
                                     32,
                                     kernel_size=3,
                                     stride=2,
                                     padding=1)
        self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
        self.classifier = Linear(1280, opt.num_classes)

        init.kaiming_normal_(self.features[0].weight, a=0, mode='fan_in')


    def forward(self, input):
        feature = self.features(input)
        feature = self.avgpool(feature)
        feature = feature.view(-1, feature.shape[1])
        prediction = self.classifier(feature)
        return {'feature': feature, 'prediction': prediction}


class inception(torchvision.models.Inception3):
    def __init__(self,opt ):
                #  opt.opt.input_nc,=True, opt.input_nc=5, out_ch=2):
        super(inception, self).__init__(num_classes=1000,
                                        aux_logits=False,
                                        transform_input=False)
        url = 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'
        if opt.input_nc:
            self.load_state_dict(model_zoo.load_url(url), strict=False)
        import scipy.stats as stats

        self.Conv2d_1a_3x3 = nn.Sequential(
            nn.Conv2d(opt.input_nc, 32, kernel_size=3, stride=2),
            nn.BatchNorm2d(32, eps=0.001),
            nn.ReLU(True),
        )

        self.classifier = Linear(2048, opt.num_classes)

        X = stats.truncnorm(-2, 2, scale=0.1)
        values = torch.Tensor(X.rvs(self.Conv2d_1a_3x3[0].weight.numel()))
        values = values.view(self.Conv2d_1a_3x3[0].weight.size())
        self.Conv2d_1a_3x3[0].weight.data.copy_(values)

        nn.init.constant_(self.Conv2d_1a_3x3[1].weight, 1)
        nn.init.constant_(self.Conv2d_1a_3x3[1].bias, 0)

    def forward(self, x):
        x = self.Conv2d_1a_3x3(x)
        x = self.Conv2d_2a_3x3(x)
        x = self.Conv2d_2b_3x3(x)
        x = F.max_pool2d(x, kernel_size=3, stride=2)
        x = self.Conv2d_3b_1x1(x)
        x = self.Conv2d_4a_3x3(x)
        x = F.max_pool2d(x, kernel_size=3, stride=2)
        x = self.Mixed_5b(x)
        x = self.Mixed_5c(x)
        x = self.Mixed_5d(x)
        x = self.Mixed_6a(x)
        x = self.Mixed_6b(x)
        x = self.Mixed_6c(x)
        x = self.Mixed_6d(x)
        x = self.Mixed_6e(x)
        x = self.Mixed_7a(x)
        x = self.Mixed_7b(x)
        x = self.Mixed_7c(x)
        feature = self.avgpool(x)
        feature = torch.flatten(feature, 1)

        # prediction = F.dropout(feature, training=self.training)
        prediction = self.classifier(feature)
        return {'feature': feature, 'prediction': prediction}

class Init_Module(nn.Module):
    def __init__(self):
        super(Init_Module, self).__init__()

    def forward(self, *input):
        raise NotImplementedError

    def init_params(self, scale=1):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal_(m.weight, a=0, mode='fan_in')
                m.weight.data *= scale  # for residual block
                if m.bias is not None:
                    init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.kaiming_normal_(m.weight, a=0, mode='fan_in')
                m.weight.data *= scale
                if m.bias is not None:
                    init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant_(m.weight, 1)
                init.constant_(m.bias.data, 0.0)


# #######################################################################
class Linear(Init_Module):
    def __init__(self, in_ch=512, out_ch=2, dense=False):
        super(Linear, self).__init__()
        if isinstance(out_ch, int):
            self.classifier = self._get_block(in_ch, out_ch, dense)
        else:
            self.classifier = nn.ModuleList(
                [self._get_block(in_ch, ch, dense) for ch in out_ch])

        self.init_params()

    def _get_block(self, in_ch, out_ch, dense):
        if dense:
            block = nn.Sequential(
                nn.Linear(in_ch, in_ch, bias=True),
                nn.ReLU(inplace=False),
                nn.Linear(in_ch, out_ch, bias=True),
            )
        else:
            block = nn.Linear(in_ch, out_ch, bias=True)
        return block

    def forward(self, input):
        if isinstance(self.classifier, (list, nn.ModuleList)):
            out = []
            for idx in range(len(self.classifier)):
                out.append(self.classifier[idx](input))
        else:
            out = self.classifier(input)
        return out


# #######################################################################
def conv3x3(in_planes, out_planes, stride=1):
    return nn.Conv2d(in_planes,
                     out_planes,
                     kernel_size=3,
                     stride=stride,
                     padding=1,
                     bias=False)


class Resnet18_encoder(nn.Module):
    def __init__(self, pretrained=True):
        super(Resnet18_encoder, self).__init__()
        model = torchvision.models.resnet18(pretrained)

        self.maxpool = model.maxpool
        self.layer1 = model.layer1
        self.layer2 = model.layer2
        self.layer3 = model.layer3
        self.layer4 = model.layer4

    def forward(self, input):
        out2 = self.layer1(self.maxpool(input))
        out3 = self.layer2(out2)
        out4 = self.layer3(out3)
        out5 = self.layer4(out4)
        return out2, out3, out4, out5


# #######################################################################
class SELayer(nn.Module):
    def __init__(self, channel, reduction=16):
        super(SELayer, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(channel, channel // reduction, bias=False),
            nn.ReLU(inplace=True),
            nn.Linear(channel // reduction, channel, bias=False), nn.Sigmoid())

    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avg_pool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y.expand_as(x)


class SEBasicBlock(nn.Module):
    expansion = 1

    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 downsample=None,
                 groups=1,
                 base_width=64,
                 dilation=1,
                 norm_layer=None,
                 *,
                 reduction=16):
        super(SEBasicBlock, self).__init__()
        self.conv1 = conv3x3(inplanes, planes, stride)
        self.bn1 = nn.BatchNorm2d(planes)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = conv3x3(planes, planes, 1)
        self.bn2 = nn.BatchNorm2d(planes)
        self.se = SELayer(planes, reduction)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        residual = x
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.se(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out += residual
        out = self.relu(out)

        return out


class SEBottleneck(nn.Module):
    expansion = 4

    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 downsample=None,
                 groups=1,
                 base_width=64,
                 dilation=1,
                 norm_layer=None,
                 *,
                 reduction=16):
        super(SEBottleneck, self).__init__()
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes,
                               planes,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(planes * 4)
        self.relu = nn.ReLU(inplace=True)
        self.se = SELayer(planes * 4, reduction)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)
        out = self.se(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out += residual
        out = self.relu(out)

        return out


def se_resnet18(num_classes=1_000):
    """Constructs a ResNet-18 model.
    """
    model = ResNet(SEBasicBlock, [2, 2, 2, 2], num_classes=num_classes)
    model.avgpool = nn.AdaptiveAvgPool2d(1)
    return model


def se_resnet34(num_classes=1_000):
    """Constructs a ResNet-34 model.
    """
    model = ResNet(SEBasicBlock, [3, 4, 6, 3], num_classes=num_classes)
    model.avgpool = nn.AdaptiveAvgPool2d(1)
    return model


def se_resnet50(num_classes=1_000):
    """Constructs a ResNet-50 model.
    """
    model = ResNet(SEBottleneck, [3, 4, 6, 3], num_classes=num_classes)
    model.avgpool = nn.AdaptiveAvgPool2d(1)
    return model


def se_resnet101(num_classes=1_000):
    """Constructs a ResNet-101 model.
    """
    model = ResNet(SEBottleneck, [3, 4, 23, 3], num_classes=num_classes)
    model.avgpool = nn.AdaptiveAvgPool2d(1)
    return model


def se_resnet152(num_classes=1_000):
    """Constructs a ResNet-152 model.
    """
    model = ResNet(SEBottleneck, [3, 8, 36, 3], num_classes=num_classes)
    model.avgpool = nn.AdaptiveAvgPool2d(1)
    return model


# #######################################################################
class _NonLocalBlockND(nn.Module):
    def __init__(self,
                 in_channels,
                 inter_channels=None,
                 dimension=3,
                 sub_sample=True,
                 bn_layer=True):
        super(_NonLocalBlockND, self).__init__()

        assert dimension in [1, 2, 3]

        self.dimension = dimension
        self.sub_sample = sub_sample

        self.in_channels = in_channels
        self.inter_channels = inter_channels

        if self.inter_channels is None:
            self.inter_channels = in_channels // 2
            if self.inter_channels == 0:
                self.inter_channels = 1

        if dimension == 3:
            conv_nd = nn.Conv3d
            max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
            bn = nn.BatchNorm3d
        elif dimension == 2:
            conv_nd = nn.Conv2d
            max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
            bn = nn.BatchNorm2d
        else:
            conv_nd = nn.Conv1d
            max_pool_layer = nn.MaxPool1d(kernel_size=(2))
            bn = nn.BatchNorm1d

        self.g = conv_nd(in_channels=self.in_channels,
                         out_channels=self.inter_channels,
                         kernel_size=1,
                         stride=1,
                         padding=0)

        if bn_layer:
            self.W = nn.Sequential(
                conv_nd(in_channels=self.inter_channels,
                        out_channels=self.in_channels,
                        kernel_size=1,
                        stride=1,
                        padding=0), bn(self.in_channels))
            nn.init.constant_(self.W[1].weight, 0)
            nn.init.constant_(self.W[1].bias, 0)
        else:
            self.W = conv_nd(in_channels=self.inter_channels,
                             out_channels=self.in_channels,
                             kernel_size=1,
                             stride=1,
                             padding=0)
            nn.init.constant_(self.W.weight, 0)
            nn.init.constant_(self.W.bias, 0)

        self.theta = conv_nd(in_channels=self.in_channels,
                             out_channels=self.inter_channels,
                             kernel_size=1,
                             stride=1,
                             padding=0)

        self.phi = conv_nd(in_channels=self.in_channels,
                           out_channels=self.inter_channels,
                           kernel_size=1,
                           stride=1,
                           padding=0)

        if sub_sample:
            self.g = nn.Sequential(self.g, max_pool_layer)
            self.phi = nn.Sequential(self.phi, max_pool_layer)

    def forward(self, x):
        '''
        :param x: (b, c, t, h, w)
        :return:
        '''

        batch_size = x.size(0)

        g_x = self.g(x).view(batch_size, self.inter_channels, -1)
        g_x = g_x.permute(0, 2, 1)

        theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
        theta_x = theta_x.permute(0, 2, 1)
        phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
        f = torch.matmul(theta_x, phi_x)
        N = f.size(-1)
        f_div_C = f / N

        y = torch.matmul(f_div_C, g_x)
        y = y.permute(0, 2, 1).contiguous()
        y = y.view(batch_size, self.inter_channels, *x.size()[2:])
        W_y = self.W(y)
        z = W_y + x

        return z


class NONLocalBlock1D(_NonLocalBlockND):
    def __init__(self,
                 in_channels,
                 inter_channels=None,
                 sub_sample=True,
                 bn_layer=True):
        super(NONLocalBlock1D, self).__init__(in_channels,
                                              inter_channels=inter_channels,
                                              dimension=1,
                                              sub_sample=sub_sample,
                                              bn_layer=bn_layer)


class NONLocalBlock2D(_NonLocalBlockND):
    def __init__(self,
                 in_channels,
                 inter_channels=None,
                 sub_sample=True,
                 bn_layer=True):
        super(NONLocalBlock2D, self).__init__(in_channels,
                                              inter_channels=inter_channels,
                                              dimension=2,
                                              sub_sample=sub_sample,
                                              bn_layer=bn_layer)


class NONLocalBlock3D(_NonLocalBlockND):
    def __init__(self,
                 in_channels,
                 inter_channels=None,
                 sub_sample=True,
                 bn_layer=True):
        super(NONLocalBlock3D, self).__init__(in_channels,
                                              inter_channels=inter_channels,
                                              dimension=3,
                                              sub_sample=sub_sample,
                                              bn_layer=bn_layer)
