import os

import torch
import torch.nn as nn
import torch.nn.functional as F
from gxl_ai_utils.utils import utils_file

from wenet.transformer.encoder import TransformerEncoder
# from wenet.utils.init_model import WENET_ENCODER_CLASSES


class SpeechEncoder2(nn.Module):
    def __init__(self, hidden_size=3584, dropout=0.1):
        super().__init__()
        self.hidden_size = hidden_size
        # Transformer模块：捕获全局依赖
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=hidden_size,
            nhead=8,
            dim_feedforward=int(hidden_size * 1.5),
            dropout=dropout,
            batch_first=True
        )
        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=4)

        self.input_fc = nn.Linear(80, hidden_size)
        self.dropout = nn.Dropout(dropout)

    def _generate_mask(self, x_lens, max_len):
        return (torch.arange(max_len).to(x_lens.device)[None, :] >= x_lens[:, None])

    def forward(self, x, x_lens, query_for_speech=None):
        """
        SpeechEncoder的前向传播
        Args:
            x: (B, T, 80)
            x_lens:  (B,)
            query_vector: (1,1,H)

        Returns:

        """
        x = self.input_fc(x)  # (B, T, H)
        x = self.dropout(x)
        if query_for_speech is not None:
            x = torch.cat([query_for_speech.expand(x.size(0), -1, -1), x], dim=1)  # (B, T+1, H)
            x_lens = x_lens + 1
        # 生成Transformer的注意力mask
        mask = self._generate_mask(x_lens, x.size(1))

        # Transformer处理
        x = self.transformer(x, src_key_padding_mask=mask)  # (B, T, H)
        x = x[:, :1, :]  # (B,1, H)

        return x


class SpeechEncoder(nn.Module):
    def __init__(self, encoder):
        super().__init__()
        self.encoder = encoder # osum的encoder
        self.down_fn = nn.Linear(3584, 1024)
        self.up_fn = nn.Linear(1024, 3584)

    def forward(self, x, x_lens, query_for_speech):
        """
        SpeechEncoder的前向传播
        Args:
            x: (B, T, 80)
            x_lens:  (B,)
            query_vector: (B,1,H)

        Returns:

        """
        query_for_speech_down = self.down_fn(query_for_speech)
        x_embed, x_mask = self.encoder(x, x_lens, query_for_speech_down)
        # print(f'x_embed shape: {x_embed.shape}')
        # print(f'x_mask shape: {x_mask.shape}')
        query_after_encoder = x_embed[:, :query_for_speech.size(1), :] # (B,query_num,H)
        query_after_encoder_up = self.up_fn(query_after_encoder)
        return query_after_encoder_up # (B,query_num,H)，for llm input


def init_speech_encoder(configs, wihsper_ckpt_path = None):
    encoder = TransformerEncoder(
        configs['input_dim'],
        global_cmvn=None,
        **configs['encoder_conf'],
        **configs['encoder_conf']['efficient_conf']
        if 'efficient_conf' in configs['encoder_conf'] else {})
    speech_encoder = SpeechEncoder(encoder)
    if wihsper_ckpt_path is not None:
        print(f'load whisper encoder from {wihsper_ckpt_path}')
        ckpt = torch.load(wihsper_ckpt_path, map_location='cpu')
        missing_keys, unexpected_keys = speech_encoder.load_state_dict(ckpt,  strict=False)
        rank = int(os.environ.get('RANK', 0))
        if rank == 0:
            for key in missing_keys:
                print("missing tensor: {}".format(key))
            for key in unexpected_keys:
                print("unexpected tensor: {}".format(key))
        print(f'load whisper encoder success')
    return speech_encoder

if __name__ == '__main__':
    config_path = "../../examples/osum/conf/config_llm_huawei_base-version.yaml"
    configs = utils_file.load_dict_from_yaml(config_path)
    encoder = TransformerEncoder(
        configs['input_dim'],
        global_cmvn=None,
        **configs['encoder_conf'],
        **configs['encoder_conf']['efficient_conf']
        if 'efficient_conf' in configs['encoder_conf'] else {})
    speech_encoder = SpeechEncoder(encoder)
    # encoder = SpeechEncoder() # 每层86.06M
    utils_file.print_model_size(speech_encoder)
    x = torch.rand(4, 100, 80)
    x_lens = torch.randint(1, 100, (4,))
    query_vector = torch.rand(4, 1, 3584)
    out = speech_encoder(x, x_lens, query_vector)
    print(out.shape)