# Copyright (c) OpenMMLab. All rights reserved.
import math

import torch.nn as nn
from mmcv.runner import ModuleList

from mmocr.models.builder import ENCODERS
from mmocr.models.common import TFEncoderLayer,PositionalEncoding
from .base_encoder import BaseEncoder


def get_pad_mask(seq, pad_idx):
    return (seq != pad_idx).unsqueeze(-2)

@ENCODERS.register_module()
class TextEncoder(BaseEncoder):
    """Transformer Encoder block with self attention mechanism.

    Args:
        n_layers (int): The number of sub-encoder-layers
            in the encoder (default=6).
        n_head (int): The number of heads in the
            multiheadattention models (default=8).
        d_k (int): Total number of features in key.
        d_v (int): Total number of features in value.
        d_model (int): The number of expected features
            in the decoder inputs (default=512).
        d_inner (int): The dimension of the feedforward
            network model (default=256).
        dropout (float): Dropout layer on attn_output_weights.
        init_cfg (dict or list[dict], optional): Initialization configs.
    """

    def __init__(self,
                 n_layers=6,
                 n_head=8,
                 d_k=64,
                 d_v=64,
                 d_model=512,
                 d_inner=256,
                 num_classes=100,
                 padding_idx=10,
                 max_seq_len=55,
                 dropout=0.1,
                 init_cfg=None,
                 **kwargs):
        super().__init__(init_cfg=init_cfg)
        self.d_model = d_model
        self.layer_stack = ModuleList([
            TFEncoderLayer(
                d_model, d_inner, n_head, d_k, d_v, dropout=dropout, **kwargs)
            for _ in range(n_layers)
        ])
        self.trg_word_emb = nn.Embedding(num_classes, d_model, padding_idx=padding_idx)
        self.layer_norm = nn.LayerNorm(d_model)
        self.position_enc=PositionalEncoding(d_hid=d_model,n_position=max_seq_len,dropout=dropout)
        self.dropout=nn.Dropout(dropout)
        self.padding_idx=padding_idx
    def forward(self, targets_dict,**kwargs):
        trg_seq = targets_dict['padded_targets'][:, 1:].contiguous().to(kwargs['out_enc'].device)
        trg_embedding = self.trg_word_emb(trg_seq)
        trg_pos_encoded = self.position_enc(trg_embedding)
        tgt = self.dropout(trg_pos_encoded)

        trg_mask = get_pad_mask(trg_seq, pad_idx=self.padding_idx)
        output = tgt
        for enc_layer in self.layer_stack:
            output = enc_layer(output, trg_mask)
        output = self.layer_norm(output)
        outputs=dict(output=output,pad_mask=trg_mask)
        return outputs
