import copy
from typing import Optional, List

import torch
import torch.nn.functional as F
from torch import nn, Tensor
from .utils import _get_clones, _get_activation_fn
import math

class InteractionTransformerDecoder(nn.Module):

    def __init__(self,
                 decoder_layer,
                 rel_decoder_layer,
                 num_layers,
                 interaction_layer=None,#new
                 norm=None,
                 rel_norm=None,#new
                 return_intermediate=False):
        super().__init__()
        self.layers = _get_clones(decoder_layer, num_layers)
        self.rel_layers = _get_clones(rel_decoder_layer, num_layers)
        self.num_layers = num_layers
        if interaction_layer is not None:
            self.rel_interaction_layers = _get_clones(interaction_layer, num_layers)
        else:
            self.rel_interaction_layers = None
        self.norm = norm
        self.rel_norm = rel_norm
        self.return_intermediate = return_intermediate

    def forward(self, tgt, rel_tgt, memory,
                tgt_mask: Optional[Tensor] = None,
                memory_mask: Optional[Tensor] = None,
                tgt_key_padding_mask: Optional[Tensor] = None,
                memory_key_padding_mask: Optional[Tensor] = None,
                pos: Optional[Tensor] = None,
                query_pos: Optional[Tensor] = None,
                rel_query_pos: Optional[Tensor] = None):
        output = tgt
        rel_output = rel_tgt

        intermediate = []
        rel_intermediate = []

        for i in range(self.num_layers):
            # instance decoder layer
            output = self.layers[i](output, memory, tgt_mask=tgt_mask,
                memory_mask=memory_mask,
                tgt_key_padding_mask=tgt_key_padding_mask,
                memory_key_padding_mask=memory_key_padding_mask,
                pos=pos, query_pos=query_pos)
            # interaction decoder layer
            rel_output = self.rel_layers[i](rel_output, memory, tgt_mask=tgt_mask,
                memory_mask=memory_mask,
                tgt_key_padding_mask=tgt_key_padding_mask,
                memory_key_padding_mask=memory_key_padding_mask,
                pos=pos, query_pos=rel_query_pos)
            # instance-aware attention module
            if self.rel_interaction_layers is not None:
                rel_output = self.rel_interaction_layers[i](output, rel_output)
            # for aux loss
            if self.return_intermediate:
                intermediate.append(self.norm(output))
                rel_intermediate.append(self.rel_norm(rel_output))

        if self.norm is not None:
            output = self.norm(output)
            rel_output = self.rel_norm(rel_output)
            if self.return_intermediate:
                intermediate.pop()
                intermediate.append(output)
                rel_intermediate.pop()
                rel_intermediate.append(rel_output)

        if self.return_intermediate:
            return torch.stack(intermediate), torch.stack(rel_intermediate)

        return output, rel_output


class InteractionLayer(nn.Module):
    def __init__(self, d_model, d_feature, dropout=0.1):
        super().__init__()
        self.d_feature = d_feature

        self.det_tfm = nn.Linear(d_model, d_feature)
        self.rel_tfm = nn.Linear(d_model, d_feature)
        self.det_value_tfm = nn.Linear(d_model, d_feature)

        self.rel_norm = nn.LayerNorm(d_model)

        if dropout is not None:
            self.dropout = dropout
            self.det_dropout = nn.Dropout(dropout)
            self.rel_add_dropout = nn.Dropout(dropout)
        else:
            self.dropout = None

    def forward(self, det_in, rel_in):
        # 100, N, 256
        Q = self.det_tfm(det_in).transpose(0, 1)
        K = self.rel_tfm(rel_in).permute(1, 2, 0)
        V = self.det_value_tfm(det_in).transpose(0, 1)
        # N, 100, 100
        scores = torch.matmul(Q, K) / math.sqrt(self.d_feature)
        det_weight = F.softmax(scores.transpose(1, 2), dim = -1)
        if self.dropout is not None:
          det_weight = self.det_dropout(det_weight)
        # N, 100, 256
        rel_add = torch.matmul(det_weight, V)
        rel_out = self.rel_add_dropout(rel_add) + rel_in.transpose(0, 1)
        rel_out = self.rel_norm(rel_out).transpose(0, 1)
        # N, 100, 256
        return rel_out