import torch
import pickle
import numpy as np
from .resnet import *
from model.roi_align import RoIAlignFunction, preprocess_rois
from model.generate_proposals import GenerateProposals
from model.collect_and_distribute_fpn_rpn_proposals import CollectAndDistributeFpnRpnProposals
import torch.nn as nn
from collections import OrderedDict


class fpn_body(nn.Module):  # not work yet
    def __init__(self, conv_body, fpn_layers):
        super(fpn_body, self).__init__()
        self.conv_body = conv_body
        # Lateral convolution layers
        self.fpn_lateral = nn.ModuleList(
            [nn.Conv2d(in_channels=conv_body._modules[l][-1].bn3.num_features,
                       out_channels=256,
                       kernel_size=1,
                       stride=1,
                       padding=0) for l in fpn_layers])
        # Output convolution layers
        self.fpn_output = nn.ModuleList([nn.Conv2d(in_channels=256,
                                                   out_channels=256,
                                                   kernel_size=3,
                                                   stride=1,
                                                   padding=1) for l in fpn_layers])
        # upsampling layer, why use nearest ????!!!!
        self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
        # store fpn layer indices 
        # self.fpn_indices = [conv_body_layers.index(l) for l in fpn_layers]
        # keep fpn layer names
        self.fpn_layers = fpn_layers

    def forward(self, x):
        lateral = []
        # do forward pass on the whole conv body, and store tensors for lateral computation
        for name, module in self.conv_body.named_children():
            x = module(x)
            if name in self.fpn_layers:
                lateral.append(x)
        # do lateral convolutions
        for i in range(len(self.fpn_lateral)):
            lateral[i] = self.fpn_lateral[i](lateral[i])
        # do top-down pass
        for i in range(len(self.fpn_lateral) - 2, -1,
                       -1):  # loop is done backwards, updating from last-1 to first lateral tensors
            lateral[i] = self.upsample(lateral[i + 1]) + lateral[i]
        # do output convolutions
        for i in range(len(self.fpn_lateral)):
            lateral[i] = self.fpn_output[i](lateral[i])

        return lateral


class Reshape(nn.Module):
    def __init__(self, *size):
        self.size = size
        super(Reshape, self).__init__()

    def forward(self, input):
        return input.view(input.shape[0], *self.size)


class four_layer_conv(nn.Module):
    def __init__(self):
        super(four_layer_conv, self).__init__()
        self.relu = nn.ReLU()
        self.fcn1 = nn.Conv2d(256, 256, 3, stride=1, padding=1)
        self.fcn2 = nn.Conv2d(256, 256, 3, stride=1, padding=1)
        self.fcn3 = nn.Conv2d(256, 256, 3, stride=1, padding=1)
        self.fcn4 = nn.Conv2d(256, 256, 3, stride=1, padding=1)

    def forward(self, x):
        x = self.relu(self.fcn1(x))
        x = self.relu(self.fcn2(x))
        x = self.relu(self.fcn3(x))
        x = self.relu(self.fcn4(x))
        return x


class mask_head(nn.Module):
    def __init__(self, conv_head, roi_spatial_scale, roi_sampling_ratio, output_prob):
        super(mask_head, self).__init__()
        self.output_prob = output_prob
        self.conv_head = conv_head
        self.transposed_conv = nn.ConvTranspose2d(256 if isinstance(conv_head, four_layer_conv) else 2048, 256, 2,
                                                  stride=2, padding=0)
        self.fcn_logits = nn.Conv2d(256, 81, 1, stride=1, padding=0)
        self.relu = nn.ReLU()
        self.sigmoid = nn.Sigmoid()
        self.use_fpn = isinstance(roi_spatial_scale, list)
        self.roi_spatial_scale = roi_spatial_scale
        self.roi_sampling_ratio = roi_sampling_ratio
        self.roi_height = 14
        self.roi_width = 14

    def forward(self, x, rois, roi_original_idx=None):
        if not self.use_fpn:
            x = RoIAlignFunction.apply(x, preprocess_rois(rois), self.roi_height, self.roi_width,
                                       self.roi_spatial_scale, self.roi_sampling_ratio)  # 14x14 feature per proposal
        else:
            x = [RoIAlignFunction.apply(x[i], preprocess_rois(rois[i]), self.roi_height, self.roi_width,
                                        self.roi_spatial_scale[i],
                                        self.roi_sampling_ratio) if rois[i] is not None else None
                 for i in range(len(rois))]
            x = torch.cat(tuple(filter(lambda z: z is not None, x)), 0)
            x = x[roi_original_idx, :]
        x = self.conv_head(x)
        x = self.relu(self.transposed_conv(x))
        x = self.fcn_logits(x)
        if self.output_prob:
            x = self.sigmoid(x)
        return x


class rpn_head(nn.Module):
    def __init__(self, in_channels=1024, out_channels=1024, n_anchors=15):
        super(rpn_head, self).__init__()
        self.relu = nn.ReLU()
        self.sigmoid = nn.Sigmoid()
        self.conv = nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1)
        self.cls_logits = nn.Conv2d(out_channels, n_anchors, 1, stride=1, padding=0)
        self.bbox_pred = nn.Conv2d(out_channels, 4 * n_anchors, 1, stride=1, padding=0)

    def forward(self, x):
        conv_rpn = self.relu(self.conv(x))
        rpn_cls_prob = self.sigmoid(self.cls_logits(conv_rpn))
        rpn_bbox_pred = self.bbox_pred(conv_rpn)
        return rpn_cls_prob, rpn_bbox_pred


class detector(nn.Module):
    def __init__(self,
                 arch='resnet50',
                 # conv_body_layers=['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3'],
                 # conv head can be a list of modules to use from the main model 
                 # or can be the string 'two_layer_mlp'
                 # conv_head_layers=['layer4', 'avgpool'],
                 use_two_layer_mlp_head=False,
                 # fpn layers is a list of the layers of the conv body used to define the levels of the FPN
                 use_fpn=False,
                 fpn_extra_lvl=True,  # add additional fpn lvl by 2x subsampling the last level
                 use_rpn_head=False,
                 use_mask_head=False,
                 mask_head_type='upshare',
                 roi_feature_channels=2048,
                 num_classes=81,
                 roi_height=14,
                 roi_width=14,
                 roi_spatial_scale=0.0625,
                 roi_sampling_ratio=0):
        super(detector, self).__init__()
        # RoI Parameters
        self.roi_height = int(roi_height)
        self.roi_width = int(roi_width)
        self.roi_spatial_scale = [float(i) for i in roi_spatial_scale] if isinstance(roi_spatial_scale,
                                                                                     list) else float(roi_spatial_scale)
        self.roi_sampling_ratio = int(roi_sampling_ratio)
        # Flags        
        self.mask_head_type = mask_head_type
        self.use_fpn = use_fpn
        self.fpn_extra_lvl = fpn_extra_lvl
        self.use_rpn_head = use_rpn_head
        self.use_mask_head = use_mask_head
        self.use_two_layer_mlp_head = use_two_layer_mlp_head

        # Create main conv model
        if arch.startswith('resnet'):
            backbone = globals()[arch]()
        else:
            raise NotImplementedError
        # divide model into conv_body and conv_head
        self.backbone = nn.Sequential()
        for name, module in backbone.named_children():
            if name == 'layer4':
                break
            self.backbone.add_module(name=name, module=module)

        # self.conv_body = nn.Sequential(*[getattr(self.model, l) for l in conv_body_layers])
        # wrap in FPN model if needed, Feature Pyramid Network
        if self.use_fpn:
            self.backbone = fpn_body(self.backbone, fpn_layers=['layer1', 'layer2', 'layer3'])  # possibly not correct
        # create conv head
        if self.use_two_layer_mlp_head:
            self.conv_head = nn.Sequential(
                Reshape(-1),
                nn.Linear(256 * 7 * 7, 1024),  # seems wrong
                nn.Linear(1024, 1024)
            )
        else:
            self.conv_head = nn.Sequential(OrderedDict([
                ('layer4', backbone.layer4),
                ('avgpool', backbone.avgpool)
            ]))
            # Create heads
        # RPN head
        if self.use_rpn_head:
            if self.use_fpn:
                self.rpn = rpn_head(in_channels=256, out_channels=256, n_anchors=3)
                spatial_scales = self.roi_spatial_scale
                if self.fpn_extra_lvl:
                    spatial_scales = spatial_scales + [spatial_scales[-1] / 2.]
                self.proposal_generator = nn.ModuleList([GenerateProposals(spatial_scale=spatial_scales[i],
                                                                           anchor_sizes=(32 * 2 ** i,),
                                                                           rpn_pre_nms_top_n=(1000, 12000),
                                                                           rpn_post_nms_top_n=(1000, 2000))
                                                         for i in range(len(spatial_scales))])
                # Note, even when using the extra fpn level, proposals are note collected at this level
                self.collect_and_distr_rois = CollectAndDistributeFpnRpnProposals(spatial_scales=self.roi_spatial_scale,
                                                                                  train=self.train)
            else:
                self.rpn = rpn_head()
                self.proposal_generator = GenerateProposals()

        # bounding box regression head
        self.bbox_head = nn.Linear(roi_feature_channels, 4 * num_classes)
        # classify head
        self.cls_score = nn.Linear(roi_feature_channels, num_classes)
        # mask prediction head
        if self.use_mask_head:
            if self.mask_head_type == 'upshare':
                mask_head_conv = self.conv_head[0]
            elif self.mask_head_type == '1up4convs':
                mask_head_conv = four_layer_conv()
            self.mask_head = mask_head(mask_head_conv,
                                       self.roi_spatial_scale,
                                       self.roi_sampling_ratio, output_prob=True)

        # this is needed as batch norm layers in caffe are only affine layers (no running mean or std)
        # self.model.eval()

    def forward(self, image, rois=None, scaling_factor=None, roi_original_idx=None):
        h, w = image.size(2), image.size(3)
        # compute dense conv features
        img_features = self.backbone(image)  # equivalent to gpu_0/res4_5_sum

        # generate rois if equipped with RPN head
        if self.use_rpn_head and not self.use_fpn:
            # case without FPN, proposal generation in a single step
            rpn_cls_prob, rpn_bbox_pred = self.rpn(img_features)
            rois, rpn_roi_probs = self.proposal_generator(rpn_cls_prob, rpn_bbox_pred, h, w, scaling_factor)
        elif self.use_rpn_head and self.use_fpn:
            # case with FPN, proposal generation for each FPN level
            assert isinstance(img_features, list) and isinstance(self.roi_spatial_scale, list)
            img_features_tmp = img_features
            if self.fpn_extra_lvl:
                # add extra feature resolution by subsampling the last FPN feature level
                img_features_tmp = img_features_tmp + [nn.functional.max_pool2d(img_features[-1], 1, stride=2)]
            cls_and_bbox = [self.rpn(img_features_tmp[i]) for i in range(len(img_features_tmp))]
            rois_and_probs = [self.proposal_generator[i](cls_and_bbox[i][0], cls_and_bbox[i][1], h, w, scaling_factor)
                              for i in range(len(img_features_tmp))]
            rois = [item[0] for item in rois_and_probs]
            rpn_roi_probs = [item[1] for item in rois_and_probs]
            # we now combine rois from all FPN levels and re-assign to correct FPN level for later RoI pooling
            rois, roi_original_idx = self.collect_and_distr_rois(rois, rpn_roi_probs)

        # compute dense roi features
        if not self.use_fpn:
            roi_features = RoIAlignFunction.apply(img_features, preprocess_rois(rois), self.roi_height, self.roi_width,
                                                  self.roi_spatial_scale,
                                                  self.roi_sampling_ratio)  # 14x14 feature per proposal
        else:
            assert isinstance(img_features, list) and isinstance(rois, list) and isinstance(self.roi_spatial_scale,
                                                                                            list)
            roi_features = [
                RoIAlignFunction.apply(img_features[i], preprocess_rois(rois[i]), self.roi_height, self.roi_width,
                                       self.roi_spatial_scale[i], self.roi_sampling_ratio) for i in
                range(len(self.roi_spatial_scale))]
            # concatenate roi features from all levels of FPN
            roi_features = torch.cat(tuple(roi_features), 0)
            rois = torch.cat(tuple(rois), 0)
            # restore original order
            roi_features = roi_features[roi_original_idx, :]
            rois = rois[roi_original_idx, :]

        # compute 1x1 roi features
        roi_features = self.conv_head(roi_features)  # 1x1 feature per proposal
        roi_features = roi_features.view(roi_features.size(0), -1)

        # compute classification scores
        cls_score = self.cls_score(roi_features)

        # compute bounding box parameters 
        bbox_pred = self.bbox_head(roi_features)

        return cls_score, bbox_pred, rois, img_features
