from typing import Tuple

from wenet.hotword.attention import MultiHeadedAttention, MultiHeadedAttention2
import torch
import torch.nn as nn
from gxl_ai_utils.utils import utils_file

from wenet.utils.mask import make_pad_mask


class CrossDecoder(torch.nn.Module):
    def __init__(self,
                q_dim,
                kv_dim,
                 ) -> None:
        super().__init__()
        self.cross_decoder = MultiHeadedAttention2(
            n_head= 4,
            q_feat = q_dim,
            n_feat = kv_dim,
            dropout_rate = 0.1,
        )
    def forward(self,
                q_embed: torch.Tensor,
                k_embed: torch.Tensor,
                v_embed: torch.Tensor,
                kv_mask: torch.Tensor,
                ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        return self.cross_decoder(q_embed, k_embed, v_embed, kv_mask)[0]



if __name__ == '__main__':
    model = CrossDecoder(3584, 3584)
    model2 = CrossDecoder(1024, 3584)
    utils_file.print_model_size(model)
    utils_file.print_model_size(model2)
    wav_embed = torch.randn(2, 10, 1024) # key and value
    wav_lens = torch.tensor([10, 7])
    hot_word_embed = torch.randn(2, 4, 3584) ## query
    hot_word_len = torch.tensor([3,4])
    hot_word_mask = ~make_pad_mask(hot_word_len).unsqueeze(1)
    wav_embed_mask = ~make_pad_mask(wav_lens).unsqueeze(1)
    res = model2(wav_embed, hot_word_embed, hot_word_embed, hot_word_mask)
    print(res.shape)


