from __future__ import division

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from torch.nn.modules import loss

from utils.parse_config import *
from utils.utils import build_targets_meta

from resnet import resnet18, resnet50

class Upsample(nn.Module):
    """ nn.Upsample is deprecated """

    def __init__(self, scale_factor, mode="nearest"):
        super(Upsample, self).__init__()
        self.scale_factor = scale_factor
        self.mode = mode

    def forward(self, x):
        x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)
        return x


class YOLOHead(nn.Module):
    """Detection layer"""

    def __init__(self, anchors, img_dim=416):
        super(YOLOHead, self).__init__()
        self.anchors = anchors
        self.num_anchors = len(anchors)
        self.ignore_thres = 0.5
        self.mse_loss = nn.MSELoss()
        self.bce_loss = nn.BCELoss()
        self.obj_scale = 1
        self.noobj_scale = 100
        self.metrics = {}
        self.img_dim = img_dim
        self.grid_size = 0  # grid size

    def compute_grid_offsets(self, grid_size, cuda=True):
        self.grid_size = grid_size
        g = self.grid_size
        FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
        self.stride = self.img_dim / self.grid_size
        # Calculate offsets for each grid
        self.grid_x = torch.arange(g).repeat(g, 1).view([1, 1, g, g, 1]).type(FloatTensor)
        self.grid_y = torch.arange(g).repeat(g, 1).t().view([1, 1, g, g, 1]).type(FloatTensor)
        self.scaled_anchors = FloatTensor([(a_w / self.stride, a_h / self.stride) for a_w, a_h in self.anchors])
        self.anchor_w = self.scaled_anchors[:, 0:1].view((1, self.num_anchors, 1, 1, 1))
        self.anchor_h = self.scaled_anchors[:, 1:2].view((1, self.num_anchors, 1, 1, 1))

    def forward(self, x, targets=None, img_dim=None):

        # Tensors for cuda support
        FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor

        self.img_dim = img_dim
        num_samples = x.size(0)
        num_classes = x.size(1)
        grid_size = x.size(-2)

        prediction = (
            x.view(num_samples, num_classes, self.num_anchors, 6, grid_size, grid_size)
            .permute(0, 2, 4, 5, 1, 3)
            .contiguous()
        )

        # Get outputs
        x = torch.sigmoid(prediction[..., 0])  # Center x
        y = torch.sigmoid(prediction[..., 1])  # Center y
        w = prediction[..., 2]  # Width
        h = prediction[..., 3]  # Height
        pred_conf = torch.sigmoid(prediction[..., 4])  # Conf

        pred_cls = torch.sigmoid(prediction[..., 5:])  # Cls pred.
        # pred_cls = prediction[..., 5:]

        pred_cls_loss = F.softmax(pred_cls, dim=-2)

        pred_cls_output = torch.zeros(pred_cls.shape[:-1] + (num_classes,)).type(FloatTensor)

        for c in range(num_classes):
            pred_cls_output[..., c, c] = pred_cls[..., c, 0]

        # If grid size does not match current we compute new offsets
        if grid_size != self.grid_size:
            self.compute_grid_offsets(grid_size, cuda=x.is_cuda)

        # Add offset and scale with anchors
        pred_boxes = FloatTensor(prediction[..., :4].shape)
        pred_boxes[..., 0] = x.data + self.grid_x
        pred_boxes[..., 1] = y.data + self.grid_y
        pred_boxes[..., 2] = torch.exp(w.data) * self.anchor_w
        pred_boxes[..., 3] = torch.exp(h.data) * self.anchor_h
        
        output = torch.cat(
            (
                pred_boxes.view(num_samples, -1, 4) * self.stride,
                pred_conf.view(num_samples, -1, 1),
                pred_cls_output.view(num_samples, -1, num_classes),
            ),
            -1,
        )

        if targets is None:
            return output, 0
        else:
            total_loss = FloatTensor([0])
            for c in range(num_classes):
                pred_boxes_c = pred_boxes[..., c, :]
                pred_cls_c = pred_cls_loss[..., c, :]
                target_c = targets[targets[:, 1] == c, :]
                obj_mask, noobj_mask, tx, ty, tw, th, tcls, tconf = build_targets_meta(
                    pred_boxes=pred_boxes_c,
                    pred_cls=pred_cls_c,
                    target=target_c,
                    anchors=self.scaled_anchors,
                    ignore_thres=self.ignore_thres,
                )
                # Loss : Mask outputs to ignore non-existing objects (except with conf. loss)
                _x = x[..., c]
                _y = y[..., c]
                _w = w[..., c]
                _h = h[..., c]
                _pred_conf = pred_conf[..., c]
                _pred_cls = pred_cls_c
                if len(target_c) == 0:
                    total_loss += self.noobj_scale * self.bce_loss(_pred_conf[noobj_mask], tconf[noobj_mask])
                else:
                    loss_x = self.mse_loss(_x[obj_mask], tx[obj_mask])
                    loss_y = self.mse_loss(_y[obj_mask], ty[obj_mask])
                    loss_w = self.mse_loss(_w[obj_mask], tw[obj_mask])
                    loss_h = self.mse_loss(_h[obj_mask], th[obj_mask])
                    loss_conf_obj = self.bce_loss(_pred_conf[obj_mask], tconf[obj_mask])
                    loss_conf_noobj = self.bce_loss(_pred_conf[noobj_mask], tconf[noobj_mask])
                    loss_conf = self.obj_scale * loss_conf_obj + self.noobj_scale * loss_conf_noobj
                    loss_cls = self.bce_loss(_pred_cls[obj_mask], tcls[obj_mask])
                    total_loss += (loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls)
            total_loss /= num_classes
            return output, total_loss


class MetaModel(nn.Module):

    def __init__(self):
        super(MetaModel, self).__init__()
        self.backbone = resnet18(in_channels=4)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
    
    def forward(self, support_image):
        return self.avgpool(self.backbone(support_image)[-1])


class Darknet(nn.Module):

    def __init__(self, num_classes=18, img_size=416):
        super(Darknet, self).__init__() 
        anchors = [(23, 27),  (37, 58), (81, 82)]

        self.feature_extractor = resnet18()
        self.meta_model = MetaModel()
        self.prediction_layer = nn.Conv2d(512, len(anchors) * 6, kernel_size=1, stride=1, padding=0)
        self.yolo_head = YOLOHead(anchors=anchors, img_dim=None)

        self.img_size = img_size
        self.seen = 0
        self.header_info = np.array([0, 0, 0, self.seen, 0], dtype=np.int32)

    def forward(self, x, support_image, targets=None):
        xs = []
        img_dim = x.shape[2]
        x = self.feature_extractor(x)[-1]
        ws = self.meta_model(support_image)
        for w in ws:
            xs.append(self.prediction_layer(x * w).unsqueeze(dim=1))
        x = torch.cat(xs, dim=1)
        yolo_output, loss = self.yolo_head(x, targets, img_dim)
        return yolo_output if targets is None else (loss, yolo_output)
