import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models

from modules.retinaface.nets.common import FPN as FPN
from modules.retinaface.nets.common import SSH as SSH
from modules.retinaface.nets.mobilenetv1 import MobileNetV1


# <Class: ClassHead/>
class ClassHead(nn.Module):
    def __init__(self,inchannels=512,num_anchors=3):
        super(ClassHead,self).__init__()
        self.num_anchors = num_anchors
        self.conv1x1 = nn.Conv2d(inchannels,self.num_anchors*2,kernel_size=(1,1),stride=1,padding=0)

    def forward(self,x):
        out = self.conv1x1(x)
        out = out.permute(0,2,3,1).contiguous()
        return out.view(-1, out.shape[1] * out.shape[2] * out.shape[3] // 2, 2)
# <Class: /ClassHead>


# <Class: BboxHead/>
class BboxHead(nn.Module):
    def __init__(self,inchannels=512,num_anchors=3):
        super(BboxHead,self).__init__()
        self.conv1x1 = nn.Conv2d(inchannels,num_anchors*4,kernel_size=(1,1),stride=1,padding=0)

    def forward(self,x):
        out = self.conv1x1(x)
        out = out.permute(0,2,3,1).contiguous()
        return out.view(-1, out.shape[1] * out.shape[2] * out.shape[3] // 4, 4)
# <Class: /BboxHead>


# <Class: LandmarkHead/>
class LandmarkHead(nn.Module):
    def __init__(self,inchannels=512,num_anchors=3):
        super(LandmarkHead,self).__init__()
        self.conv1x1 = nn.Conv2d(inchannels,num_anchors*10,kernel_size=(1,1),stride=1,padding=0)

    def forward(self,x):
        out = self.conv1x1(x)
        out = out.permute(0,2,3,1).contiguous()
        return out.view(-1, out.shape[1] * out.shape[2] * out.shape[3] // 10, 10)
# <Class: /LandmarkHead>


# <Class: RetinaFace/>
class RetinaFace(nn.Module):
    """
    Some Information about RetinaFace:

    """
    # <Method: __init__/>
    def __init__(
        self, 
        backone_name = 'resnet50', 
        if_pretrained = True, 
        in_channels = 32, 
        out_channels = 64
        ):
        super(RetinaFace,self).__init__()
        if backone_name.find('resnet') >= 0:
            self._body = models._utils.IntermediateLayerGetter(getattr(models, backone_name)(pretrained=if_pretrained), {'layer2': 0, 'layer3': 1, 'layer4': 2})
        elif backone_name == 'mobilenetv1':
            self._body = models._utils.IntermediateLayerGetter(MobileNetV1(), {'stage1': 1, 'stage2': 2, 'stage3': 3})
        else:
            print("Unknow backone name.")
            assert(0)
        # end-if
        body_outputs = self._body(torch.rand(1, 3, 512, 512))
        in_channels_list = [ outp.size()[1] for outp in list(body_outputs.values())]
        print("in_channels_list: ", in_channels_list)
        # 
        self._fpn = FPN(in_channels_list, out_channels)
        self._ssh1 = SSH(out_channels, out_channels)
        self._ssh2 = SSH(out_channels, out_channels)
        self._ssh3 = SSH(out_channels, out_channels)
        # 
        self._class_head = self._make_class_head(fpn_num=3, inchannels=out_channels)
        self._boxes_head = self._make_bbox_head(fpn_num=3, inchannels=out_channels)
        self._landm_head = self._make_landmark_head(fpn_num=3, inchannels=out_channels)
    # <Method: /__init__>

    # <Method: _make_class_head/>
    def _make_class_head(self,fpn_num=3,inchannels=64,anchor_num=2):
        classhead = nn.ModuleList()
        for i in range(fpn_num):
            classhead.append(ClassHead(inchannels,anchor_num))
        # end-for
        return classhead
    # <Method: /_make_class_head>

    # <Method: _make_bbox_head/>
    def _make_bbox_head(self,fpn_num=3,inchannels=64,anchor_num=2):
        bboxhead = nn.ModuleList()
        for i in range(fpn_num):
            bboxhead.append(BboxHead(inchannels,anchor_num))
        # end-for
        return bboxhead
    # <Method: /_make_bbox_head>

    # <Method: _make_landmark_head/>
    def _make_landmark_head(self,fpn_num=3,inchannels=64,anchor_num=2):
        landmarkhead = nn.ModuleList()
        for i in range(fpn_num):
            landmarkhead.append(LandmarkHead(inchannels,anchor_num))
        # end-for
        return landmarkhead
    # <Method: /_make_landmark_head>

    # <Method: forward/>
    def forward(self,inputs):
        out = self._body(inputs)
        # FPN ...
        fpn = self._fpn(out)
        # SSH ...
        feature1 = self._ssh1(fpn[0])
        feature2 = self._ssh2(fpn[1])
        feature3 = self._ssh3(fpn[2])
        features = [feature1, feature2, feature3]
        # Concatenate ...
        bbox_regressions = torch.cat([self._boxes_head[i](feature) for i, feature in enumerate(features)], dim=1)
        classifications  = torch.cat([self._class_head[i](feature) for i, feature in enumerate(features)],dim=1)
        ldm_regressions  = torch.cat([self._landm_head[i](feature) for i, feature in enumerate(features)], dim=1)
        # output ...
        return bbox_regressions, classifications, ldm_regressions
    # <Method: /forward>
# <Class: /RetinaFace>


# python -m modules.retinaface.retinaface
if __name__ == "__main__":
    net = RetinaFace('resnet34')
    print(net)
    outputs = net(torch.rand(1, 3, 256, 256))
    print([outp.size() for outp in outputs])
    