from models.resnet import BasicBlock
import torch
import torch.nn as nn
import torch.nn.functional as F


class OurModel(nn.Module):
    def __init__(self, p, backbone, backbone_channels, task_channels=256):
        super(OurModel, self).__init__()
        # General
        self.tasks = p.TASKS.NAMES
        self.auxilary_tasks = p.AUXILARY_TASKS.NAMES
        self.n_tasks = len(self.auxilary_tasks)

        # Backbone
        self.backbone = backbone

        # Task convs
        task_convs ={}
        for t in self.auxilary_tasks:
            task_convs[t] = PPM(backbone_channels[-1], task_channels)
        self.task_convs = nn.ModuleDict(task_convs)

        # Ada learners
        ada_learners = {}
        for t in self.tasks:
            ada_learners[t] = LocationAdaptiveLearner(n_tasks=self.n_tasks, in_channels=self.n_tasks*task_channels,
                                                      out_channels=self.n_tasks*task_channels)
        self.ada_learners = nn.ModuleDict(ada_learners)
        # self.ada_learners = LocationAdaptiveLearner(n_tasks=self.n_tasks, in_channels=self.n_tasks * task_channels,
        #                                             out_channels=self.n_tasks * task_channels)

        # heads
        heads = {}
        for t in self.tasks:
            heads[t] = FPN(backbone_channels, p.AUXILARY_TASKS.NUM_OUTPUT[t], task_channels)
        self.heads = nn.ModuleDict(heads)

    def forward(self, x):
        img_size = x.size()[-2:]
        out = {}

        # Backbone
        x = self.backbone(x)

        auxilary_task_features = {}
        for t in self.auxilary_tasks:
            auxilary_task_features[t] = self.task_convs[t](x[-1])
        fuse_feature = torch.cat(list(auxilary_task_features.values()), 1)  # (N, 256*t, H, W)

        ada_weights = {}
        for t in self.tasks:
            ada_weights[t] = self.ada_learners[t](fuse_feature)  # (N, 256, t, H, W)
        # ada_weights = self.ada_learners(fuse_feature)  # (N, 256, t, H, W)

        fuse_feature = fuse_feature.view(fuse_feature.size()[0], -1, self.n_tasks, fuse_feature.size()[2], fuse_feature.size()[3])  # (N, 256, t, H, W)
        fuse_feature_split = torch.chunk(fuse_feature, chunks=self.n_tasks, dim=2)  # (t, N, 256, H, W)

        # v2
        task_features = {}
        for i, t in enumerate(self.tasks):
            task_features[t] = torch.mul(fuse_feature, ada_weights[t])  # (N, 256, t, H, W)
            task_features[t] = torch.sum(task_features[t], 2)  # (N, 256, H, W)
            task_features[t] += fuse_feature_split[i].squeeze(2)
            task_features[t] = self.heads[t](x, task_features[t])  # (N, C, H, W)
            # out[t] = F.interpolate(task_features[t], img_size, mode='bilinear')

        # fuse_feature_res = torch.mul(fuse_feature, ada_weights)  # (N, 256, t, H, W)
        # fuse_feature_weighted = fuse_feature + fuse_feature_res  # (N, 256, t, H, W)
        # fuse_feature_weighted = torch.chunk(fuse_feature_weighted, chunks=self.n_tasks, dim=2)
        # # task_features = torch.sum(task_features, 2)  # (N, 256, H, W)
        # for i, t in enumerate(self.tasks):
        #     out[t] = self.heads[t](x, fuse_feature_weighted[i].squeeze(2))

        return task_features
        # return out


class LocationAdaptiveLearner(nn.Module):
    """docstring for LocationAdaptiveLearner"""
    def __init__(self, n_tasks, in_channels, out_channels, norm_layer=nn.BatchNorm2d):
        super(LocationAdaptiveLearner, self).__init__()
        self.n_tasks = n_tasks

        self.conv1 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=True),
                                   norm_layer(out_channels),
                                   nn.ReLU(inplace=True))
        self.conv2 = nn.Sequential(nn.Conv2d(out_channels, out_channels, 1, bias=True),
                                   norm_layer(out_channels),
                                   nn.ReLU(inplace=True))
        self.conv3 = nn.Sequential(nn.Conv2d(out_channels, out_channels, 1, bias=True),
                                   norm_layer(out_channels))

    def forward(self, x):
        # x:side5_w (N, 256*3, H, W)
        x = self.conv1(x) # (N, 256*3, H, W)
        x = self.conv2(x) # (N, 256*3, H, W)
        x = self.conv3(x) # (N, 256*3, H, W)
        x = x.view(x.size(0), -1, self.n_tasks, x.size(2), x.size(3)) # (N, 256, 3, H, W)
        return x


class PPM(nn.Module):
    def __init__(self, input_channels, out_channels, pool_scales=(1, 2, 3, 6)):
        super(PPM, self).__init__()
        # PPM Module
        self.ppm_pooling = []
        self.ppm_conv = []

        for scale in pool_scales:
            self.ppm_pooling.append(nn.AdaptiveAvgPool2d(scale))
            self.ppm_conv.append(nn.Sequential(
                nn.Conv2d(input_channels, 512, kernel_size=1, bias=False),
                nn.BatchNorm2d(512),
                nn.ReLU(inplace=True)
            ))
        self.ppm_pooling = nn.ModuleList(self.ppm_pooling)
        self.ppm_conv = nn.ModuleList(self.ppm_conv)
        self.ppm_last_conv = nn.Sequential(nn.Conv2d(input_channels+len(pool_scales)*512, input_channels, kernel_size=3,
                                                     stride=1, padding=1, bias=False),
                                           nn.BatchNorm2d(input_channels),
                                           nn.ReLU(inplace=True),
                                           nn.Conv2d(input_channels, out_channels, kernel_size=1))

    def forward(self, x, activation=None):
        ppm_out = [x]
        input_size = x.size()
        for pool_scale, pool_conv in zip(self.ppm_pooling, self.ppm_conv):
            ppm_out.append(pool_conv(nn.functional.interpolate(
                pool_scale(x),
                (input_size[2], input_size[3]),
                mode='bilinear', align_corners=False)))
        ppm_out = torch.cat(ppm_out, 1)
        f = self.ppm_last_conv(ppm_out)
        # if activation == 'height':
        #     f = torch.sigmoid(f)

        return f


class FPN(nn.Module):
    def __init__(self, backbone_channels, num_class,  fpn_dim=256):
        super(FPN, self).__init__()

        # FPN Module
        self.fpn_in = []
        for fpn_inplane in backbone_channels[:-1]:   # skip the top layer
            self.fpn_in.append(nn.Sequential(
                nn.Conv2d(fpn_inplane, fpn_dim, kernel_size=1, bias=False),
                nn.BatchNorm2d(fpn_dim),
                nn.ReLU(inplace=True)
            ))
        self.fpn_in = nn.ModuleList(self.fpn_in)

        self.fpn_out = []
        for i in range(len(backbone_channels) - 1):  # skip the top layer
            self.fpn_out.append(nn.Sequential(
                conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
            ))
        self.fpn_out = nn.ModuleList(self.fpn_out)

        self.conv_last = nn.Sequential(
            conv3x3_bn_relu(len(backbone_channels) * fpn_dim, fpn_dim, 1),
            nn.Conv2d(fpn_dim, num_class, kernel_size=1)
        )

    def forward(self, conv_out, f):
        """
        conv_out is the output of the backbone, f is the fusion feature
        """
        fpn_feature_list = [f]
        for i in reversed(range(len(conv_out) - 1)):
            conv_x = conv_out[i]
            conv_x = self.fpn_in[i](conv_x) # lateral branch

            f = nn.functional.interpolate(
                f, size=conv_x.size()[2:], mode='bilinear', align_corners=False) # top-down branch
            f = conv_x + f

            fpn_feature_list.append(self.fpn_out[i](f))

        fpn_feature_list.reverse() # [P2 - P5]
        output_size = fpn_feature_list[0].size()[2:]
        fusion_list = [fpn_feature_list[0]]
        for i in range(1, len(fpn_feature_list)):
            fusion_list.append(nn.functional.interpolate(
                fpn_feature_list[i],
                output_size,
                mode='bilinear', align_corners=False))
        fusion_out = torch.cat(fusion_list, 1)
        x = self.conv_last(fusion_out)

        x = nn.functional.interpolate(x, scale_factor=4,  mode='bilinear', align_corners=True)

        return x


def conv3x3_bn_relu(in_planes, out_planes, stride=1):
    "3x3 convolution + BN + relu"
    return nn.Sequential(
            nn.Conv2d(in_planes, out_planes, kernel_size=3,
                      stride=stride, padding=1, bias=False),
            nn.BatchNorm2d(out_planes),
            nn.ReLU(inplace=True),
            )



