# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name： dab_encoder
Description :
Author : 'li'
date： 2022/7/4
Change Activity:
2022/7/4:
-------------------------------------------------
"""
from typing import Optional

from torch import nn, Tensor

from .dab_encoder_layer import DABEncoderLayer
from ..mlp import MLP
from ....model.base import BaseModule


class DABEncoder(BaseModule):
    def __init__(self, num_layers=6, norm=None, embed_dim=256, num_heads=6, hidden_channels=2048, dropout=0.1, activation='ReLU', normalize_before=False):
        """

        Args:
            num_layers:
            norm:
            embed_dim:
            num_heads:
            hidden_channels:
            dropout:
            activation:
            normalize_before:

        Returns:

        """
        super().__init__()
        self.num_layers = num_layers
        self.norm = norm
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.hidden_channels = hidden_channels
        self.dropout = dropout
        self.activation = activation
        self.normalize_before = normalize_before
        self.query_scale = MLP(embed_dim, embed_dim, embed_dim, 2)
        self.layers = self._build_layers()

    def _build_layers(self):
        layers = nn.ModuleList()
        for i in range(self.num_layers):
            layer = DABEncoderLayer(embed_dim=self.embed_dim, num_heads=self.num_heads, hidden_channels=self.hidden_channels, dropout=self.dropout, activation=self.activation,
                                    normalize_before=self.normalize_before)
            layers.append(layer)
        return layers

    def forward(self, src,
                mask: Optional[Tensor] = None,
                src_key_padding_mask: Optional[Tensor] = None,
                pos: Optional[Tensor] = None):
        output = src
        for layer_id, layer in enumerate(self.layers):
            # rescale the content and pos sim
            pos_scales = self.query_scale(output)  # mlp
            output = layer(output, src_mask=mask,
                           src_key_padding_mask=src_key_padding_mask, pos=pos * pos_scales)

        if self.norm is not None:
            output = self.norm(output)

        return output
