import torch
import torchvision
import torch.nn as nn
from .attn_branch import RGA_Module


class AttnCls(nn.Module):
    
    __resnet_factory = {
    18: torchvision.models.resnet18,
    34: torchvision.models.resnet34,
    50: torchvision.models.resnet50,
    101: torchvision.models.resnet101,
    152: torchvision.models.resnet152,
    }
    
    def __init__(self, num_classes, convs_depth, convs_pretrained):
        super(AttnCls, self).__init__()
        
        self.model1 = self.__resnet_factory[convs_depth](pretrained=convs_pretrained)
        in_feat = self.model1.fc.in_features
        new_fc = torch.nn.Linear(in_features=in_feat, out_features=num_classes, bias=True)
        self.model1.fc = new_fc
        # print(self.model1)
        
        if convs_depth in [18, 34]:
            in_ch = 64
        elif convs_depth in [50, 101, 152]:
            in_ch = 256
        else:
            print('Unknown resnet depth!')
            raise ValueError
        
        self.rga_att1 = RGA_Module(in_ch, 16*16, 
                                    use_spatial=True, use_channel=True,
                                    cha_ratio=2, spa_ratio=2, down_ratio=2)
        
        self.rga_att2 = RGA_Module(in_ch*2, 8*8, 
                                    use_spatial=True, use_channel=True,
                                    cha_ratio=2, spa_ratio=2, down_ratio=2)
        
        self.rga_att3 = RGA_Module(in_ch*4, 4*4, 
                                    use_spatial=True, use_channel=True,
                                    cha_ratio=2, spa_ratio=2, down_ratio=2)
        
        self.rga_att4 = RGA_Module(in_ch*8, 2*2, 
                                    use_spatial=True, use_channel=True,
                                    cha_ratio=2, spa_ratio=2, down_ratio=2)
        
    def forward(self, x):
        # print(x.shape)  # [1, 3, 64, 64]
        h = self.model1.conv1(x)
        # print(h.shape)  # [1, 64, 32, 32]
        h = self.model1.bn1(h)
        # print(h.shape)  # [1, 64, 32, 32]
        h = self.model1.relu(h)
        # print(h.shape)  # [1, 64, 32, 32]
        h = self.model1.maxpool(h)
        # print(h.shape)  # [1, 64, 16, 16]
        
        h = self.model1.layer1(h)
        # print(h.shape)  # [1, 64, 16, 16]
        h = self.rga_att1(h)
        
        h = self.model1.layer2(h)
        # print(h.shape)  # [1, 128, 8, 8]
        h = self.rga_att2(h)
        
        h = self.model1.layer3(h)
        # print(h.shape)  # [1, 256, 4, 4]
        h = self.rga_att3(h)
        
        h = self.model1.layer4(h)
        # print(h.shape)  # [1, 512, 2, 2]
        h = self.rga_att4(h)
        
        h = self.model1.avgpool(h)
        # print(h.shape)  # [1, 512, 1, 1]
        
        h = torch.flatten(h, 1)    
        y = self.model1.fc(h)
        # print(y.shape)
    
        return y


if __name__ == '__main__':
    x = torch.ones((2, 3, 64, 64))
    
    model = AttnCls(11, 18, False)
    print(model)
    
    y = model(x)
    print(y.shape)
    
    # for i in model._modules:
        # print(i)
