import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.nn.parameter import Parameter
import numpy as np
from models.dsam_layers import center_crop
from modules.transformer import TransformerEncoder

class av_attention_fusion(nn.Module):
    def __init__(self, rgb_nfilters, audio_nfilters, img_size, hidden_layers):
        super(av_attention_fusion, self).__init__()

        self.rgb_nfilters = rgb_nfilters
        self.audio_nfilters = audio_nfilters
        self.hidden_layers = hidden_layers
        self.img_size = img_size

        self.gamma = Parameter(torch.zeros(1))
        self.relu = nn.ReLU(inplace=True)
        self.affine_rgb = nn.Linear(rgb_nfilters, hidden_layers)
        self.affine_audio = nn.Linear(audio_nfilters, hidden_layers)
        
        self.affine_feat_back = nn.Linear(hidden_layers, rgb_nfilters)
        
    
    def forward(self, rgb, audio):
        x = rgb
        # print("x.shape ori: ", x.shape)
        assert x.shape[1] == self.rgb_nfilters, "input rgb shape {} should match {}.".format(x.shape[1], self.rgb_nfilters)
        rgb = rgb.permute(0, 2, 3, 1)
        rgb = rgb.reshape(rgb.size(0), -1, self.rgb_nfilters)
        rgb = self.affine_rgb(rgb)
        rgb = self.relu(rgb)

        audio1 = self.affine_audio(audio[0].squeeze())
        audio1 = self.relu(audio1)
        audio1 = audio1.unsqueeze(1).expand(-1, rgb.shape[1], -1).contiguous()
        # print("rgb.shape: ", rgb.shape)
        # print("audio1.shape: ", audio1.shape)

        fusion_feat = torch.mul(rgb, audio1)
        att_feat = self.affine_feat_back(fusion_feat)
        att_feat = att_feat.permute(0, 2, 1)
        att_feat = att_feat.reshape(att_feat.shape[0], -1, x.shape[2], x.shape[3]).contiguous()

        # print("att_feat.shape, x.shape: ", att_feat.shape, x.shape)
        output = att_feat * self.gamma + x
        return output

class av_transformer_module(nn.Module):

    def __init__(self, rgb_nfilters, audio_nfilters, img_size, temp_size, hidden_layers, hyp_params):

        super(av_transformer_module, self).__init__()

        self.rgb_nfilters = rgb_nfilters
        self.audio_nfilters = audio_nfilters
        self.hidden_layers = hidden_layers
        self.out_layers = 64
        # self.out_layers = rgb_nfilters
        self.img_size = img_size
        self.avgpool_rgb = nn.AvgPool3d((temp_size, 1, 1), stride=1)
        # Make the layers numbers equal
        self.relu = nn.ReLU()
        self.use_relu = True

        self.transformer = TransformerEncoder(embed_dim=hidden_layers,
                                  num_heads=hyp_params.num_heads,
                                  layers=hyp_params.nlevels,
                                  attn_dropout=hyp_params.attn_dropout,
                                  relu_dropout=hyp_params.relu_dropout,
                                  res_dropout=hyp_params.res_dropout,
                                  embed_dropout=hyp_params.embed_dropout,
                                  attn_mask=hyp_params.attn_mask)
        self.use_att = hyp_params.use_selfAtt

        if self.use_att:
            self.transformer_self_att = TransformerEncoder(embed_dim=hidden_layers,
                                      num_heads=hyp_params.num_heads,
                                      layers=3,
                                      attn_dropout=hyp_params.attn_dropout,
                                      relu_dropout=hyp_params.relu_dropout,
                                      res_dropout=hyp_params.res_dropout,
                                      embed_dropout=hyp_params.embed_dropout,
                                      attn_mask=hyp_params.attn_mask)
        # self.self_attention = TransformerEncoder(embed_dim=embed_dim,
        #                           num_heads=self.num_heads,
        #                           layers=max(self.layers, layers),
        #                           attn_dropout=attn_dropout,
        #                           relu_dropout=self.relu_dropout,
        #                           res_dropout=self.res_dropout,
        #                           embed_dropout=self.embed_dropout,
        #                           attn_mask=self.attn_mask)

        # Projection layers
        # self.proj1 = nn.Linear(combined_dim, combined_dim)
        # self.proj2 = nn.Linear(combined_dim, combined_dim)
        self.out_layer = nn.Linear(hidden_layers, self.out_layers)

        self.affine_rgb = nn.Linear(rgb_nfilters, hidden_layers)
        self.affine_audio = nn.Linear(audio_nfilters, hidden_layers)
        # self.w_a_rgb = nn.Bilinear(hidden_layers, hidden_layers, self.out_layers, bias=True)
        self.upscale_ = nn.Upsample(scale_factor=8, mode='bilinear')

    def forward(self, rgb, audio, crop_h, crop_w):
        self.crop_w = crop_w
        self.crop_h = crop_h
        if len(rgb.size()) > 4:
            dgb = rgb[:,:,rgb.shape[2]//2-1:rgb.shape[2]//2+1,:,:]
            rgb = self.avgpool_rgb(dgb).squeeze(2)
        rgb = rgb.permute(0, 2, 3, 1)
        rgb = rgb.view(rgb.size(0), -1, self.rgb_nfilters).contiguous()

        rgb = self.affine_rgb(rgb)
        if self.use_relu:
            rgb = self.relu(rgb)

        audio1 = self.affine_audio(audio[0].squeeze())
        if self.use_relu:
            audio1 = self.relu(audio1)
        audio1 = audio1.unsqueeze(1).expand(-1, self.img_size[0] * self.img_size[1], -1).contiguous()

        rgb = rgb.permute(1, 0, 2)
        audio1 = audio1.permute(1, 0, 2)
        fusion_out = self.transformer(rgb, audio1, audio1)
        if self.use_att:
            fusion_out = self.transformer_self_att(fusion_out)
        # [196, N, 128] = [14, 14, N, 128]
        if type(fusion_out) == tuple:
            fusion_out = fusion_out[0]
        fusion_out = fusion_out.permute(1, 0, 2)
        # [N, 196, 128]
        fusion_out = self.out_layer(fusion_out)
        sal_bilin = fusion_out.view(-1, self.img_size[0], self.img_size[1], self.out_layers)
        sal_bilin = sal_bilin.permute(0, 3, 1, 2).contiguous()
        sal_bilin = center_crop(self.upscale_(sal_bilin), self.crop_h, self.crop_w)
        # sal_bilin [64, 64, 112, 112]

        return sal_bilin


class NonLocal_Module_av(nn.Module):
    """ NonLocal_module is similar to Position attention module of DANet (CVPR2019)"""
    #Ref from SAGAN
    def __init__(self, in_dim):
        super(NonLocal_Module_av, self).__init__()
        self.chanel_in = in_dim

        self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//2, kernel_size=1)
        self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//2, kernel_size=1)
        self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
        self.gamma = Parameter(torch.zeros(1))

        self.softmax = nn.Softmax(dim=-1)
    def forward(self, x, audio):
        """
            inputs :
                x : input feature maps( B X C X H X W)
            returns :
                out : attention value + input feature
                attention: B X (HxW) X (HxW)
        """
        m_batchsize, C, height, width = x.size()
        proj_query = self.query_conv(audio).view(m_batchsize, -1, width*height).permute(0, 2, 1)
        proj_key = self.key_conv(audio).view(m_batchsize, -1, width*height)
        energy = torch.bmm(proj_query, proj_key)
        attention = self.softmax(energy)
        proj_value = self.value_conv(x).view(m_batchsize, -1, width*height)

        out = torch.bmm(proj_value, attention.permute(0, 2, 1))
        out = out.view(m_batchsize, C, height, width)

        out = self.gamma*out + x
        return out