import torch
import torch.nn as nn
from torch.nn import functional as F
import numpy as np
from modules.temporal_se import TempSELayer, TempAvgLayer
from modules.nonLocal_module import NonLocal_Module_Pos
from models.transformer_fusion import av_transformer_module

BN_MOMENTUM = 0.1

def center_crop(x, height, width):
    crop_h = torch.FloatTensor([x.size()[2]]).sub(height).div(-2)
    crop_w = torch.FloatTensor([x.size()[3]]).sub(width).div(-2)

    # fixed indexing for PyTorch 0.4
    return F.pad(x, [int(crop_w.ceil()[0]), int(crop_w.floor()[0]), int(crop_h.ceil()[0]), int(crop_h.floor()[0])])


class dsam_fusion(nn.Module):
    def __init__(self, num_layers, nfilters, dst_nfilters, opt, scale_factor=[2, 2, 2, 2]):

        super(dsam_fusion, self).__init__()
        # self.upscale = []
        self.num_layers = num_layers
        self.relu = nn.ReLU(inplace=True)
        self.use_transformer = opt.use_transformer
        # if opt.use_transformer:
        #     self.fusion_av = av_transformer_module(dst_nfilters[num_layers - 1],
        #                                            1024,
        #                                            [int(114 / 8), int(114 / 8)],
        #                                            1,
        #                                            128,
        #                                            opt
        #                                            )
        for i in range(num_layers):
            if not opt.no_use_transposeConv:
                upscale = nn.Sequential(
                    nn.ConvTranspose2d(nfilters[i], nfilters[i], kernel_size=2 * scale_factor[i], stride=scale_factor[i], bias=False),
                    nn.BatchNorm2d(nfilters[i], momentum=BN_MOMENTUM),
                    )
                setattr(self, 'upscale_{}'.format(i), upscale)
                # self.upscale.append(nn.Sequential(
                #     nn.ConvTranspose2d(nfilters[i], nfilters[i], kernel_size=2 * scale_factor[i], stride=scale_factor[i], bias=False),
                #     nn.BatchNorm2d(nfilters[i], momentum=BN_MOMENTUM),
                #     ))
            else:
                # self.upscale.append(nn.Sequential(
                #     nn.BatchNorm2d(nfilters[i], momentum=BN_MOMENTUM),
                #     nn.Upsample(scale_factor=scale_factor[i], mode='nearest'),
                # ))
                if i == num_layers - 1:
                    upscale = nn.Sequential(
                        nn.BatchNorm2d(dst_nfilters[i], momentum=BN_MOMENTUM),
                        nn.Upsample(scale_factor=scale_factor[i], mode=opt.upsample_func),
                    )
                else:
                    upscale = nn.Sequential(
                        nn.Conv2d(nfilters[i], dst_nfilters[i],
                                  kernel_size=1, padding=0, bias=False),
                        nn.BatchNorm2d(dst_nfilters[i], momentum=BN_MOMENTUM),
                        nn.Upsample(scale_factor=scale_factor[i], mode=opt.upsample_func),
                        # nn.Conv2d(dst_nfilters[i], dst_nfilters[i],
                        #           kernel_size=3, padding=1, bias=False),
                        # nn.BatchNorm2d(dst_nfilters[i], momentum=BN_MOMENTUM),
                    )
                setattr(self, 'upscale_{}'.format(i), upscale)

    def forward(self, inputs, crop_h=None, crop_w=None):
        outputs = []
        for i in range(self.num_layers-1, -1, -1):
            upscale = getattr(self, "upscale_{}".format(i))
            if i == self.num_layers-1:
                out = upscale(inputs[i])
                # if self.use_transformer:
                #     out = self.fusion_av(out, aud, crop_h, crop_w)
                outputs.append(out)
            else:
                out = out + inputs[i]
                out = self.relu(out)
                out = upscale(out)
                outputs.append(out)
        return outputs


class dsam_score_dsn(nn.Module):

    def __init__(self, prev_layer, prev_nfilters, prev_nsamples, opt):

        super(dsam_score_dsn, self).__init__()
        i = prev_layer
        self.prev_layer = prev_layer
        self.opt = opt
        if opt.pool_layer == 'avgpool':
            self.avgpool = nn.AvgPool3d((prev_nsamples, 1, 1), stride=1)
        elif opt.pool_layer == 'tempSE':
            self.avgpool = TempSELayer(prev_nsamples, prev_nfilters[0])
        elif opt.pool_layer == 'tempAvg':
            self.avgpool = TempAvgLayer(prev_nsamples, prev_nfilters[0],
                                        reduction=opt.temp_reduction, use_fc=opt.use_tempAvg_fc)

        if self.prev_layer == 4:
            self.side_prep = nn.Conv2d(prev_nfilters[0], prev_nfilters[1], kernel_size=1, padding=0, bias=False)
            if not opt.no_use_spatio_att:
                self.spatial_att = NonLocal_Module_Pos(prev_nfilters[1])
            if not self.opt.no_use_dsam_att:
                self.score_dsn = nn.Conv2d(prev_nfilters[1], 1, kernel_size=1, padding=0)
            if not opt.no_use_dsam_multiScale:
                self.upscale_ = nn.ConvTranspose2d(1, 1, kernel_size=2 ** (1 + i), stride=2 ** i, bias=False)
        else:
            if not opt.no_use_spatio_att:
                self.spatial_att = NonLocal_Module_Pos(prev_nfilters[0])
            if not self.opt.no_use_dsam_att:
                self.score_dsn = nn.Conv2d(prev_nfilters[0], 1, kernel_size=1, padding=0)
            if not opt.no_use_dsam_multiScale:
                self.upscale_ = nn.ConvTranspose2d(1, 1, kernel_size=2 ** (1 + i), stride=2 ** i, bias=False)

        # if not opt.no_use_transposeConv:
        #     self.upscale = nn.Sequential(
        #         nn.ConvTranspose2d(prev_nfilters[1], prev_nfilters[1], kernel_size=3, stride=2, bias=False),
        #         nn.BatchNorm2d(prev_nfilters[1], momentum=BN_MOMENTUM),
        #         )
        # else:
        #     self.upscale = nn.Sequential(
        #         nn.BatchNorm2d(prev_nfilters[1], momentum=BN_MOMENTUM),
        #         nn.Upsample(scale_factor=scale_factor, mode='nearest'),
        #     )

    def forward(self, x, crop_h, crop_w):
        side_out = []
        side_out_tmp = []
        self.crop_h = crop_h
        self.crop_w = crop_w
        x = self.avgpool(x).squeeze(2)
        # last_output = x
        if self.prev_layer == 4:
            x = self.side_prep(x)

        if not self.opt.no_use_spatio_att:
            x = self.spatial_att(x)

        if not self.opt.no_use_dsam_att:
            side_out_tmp.append(self.score_dsn(x))
        if not self.opt.no_use_dsam_multiScale:
            side_out.append(center_crop(self.upscale_(side_out_tmp[0]), self.crop_h, self.crop_w))

        side = x
        # side.append(center_crop(self.upscale(x), self.crop_h, self.crop_w))

        return side, side_out, side_out_tmp # , last_output


def upsample_filt(size):
    factor = (size + 1) // 2
    if size % 2 == 1:
        center = factor - 1
    else:
        center = factor - 0.5
    og = np.ogrid[:size, :size]
    return (1 - abs(og[0] - center) / factor) * \
           (1 - abs(og[1] - center) / factor)

def spatial_softmax(x):
    x = torch.exp(x)
    sum_batch = torch.sum(torch.sum(x, 2, keepdim=True), 3, keepdim=True)
    x_soft = torch.div(x,sum_batch)
    return x_soft

# set parameters s.t. deconvolutional layers compute bilinear interpolation
# this is for deconvolution without groups
def interp_surgery(lay):
        m, k, h, w = lay.weight.data.size()
        if m != k:
            print('input + output channels need to be the same')
            raise ValueError
        if h != w:
            print('filters need to be square')
            raise ValueError
        filt = upsample_filt(h)

        for i in range(m):
            lay.weight[i, i, :, :].data.copy_(torch.from_numpy(filt))

        return lay.weight.data
