import torch
import fairseq
from fairseq.models.hubert import HubertModel

class HUBERT(torch.nn.Module):
    def __init__(self, input_dim=-1,
        global_cmvn=None,hubert_path=""):
        super().__init__()
        # 从预训练模型中加载Hubert模型及配置（这里假设使用默认的预训练设置，你可以根据实际情况替换路径等）
        model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([
            hubert_path
        ])
        self.hubert = model[0]
        print(self.hubert)
        self.dropout = torch.nn.Dropout(0.1)
    def forward(self, x, x_lens):
        batch_size, max_sequence_length = x.size()
        padding_mask = torch.arange(max_sequence_length, device=x.device).expand(batch_size, -1).type_as(
            x_lens) >= x_lens.unsqueeze(1)
        x, other_features = self.hubert.extract_features(x,
                                            padding_mask=padding_mask,
                                            mask=False,
                                           )
        x = self.dropout(x)
        return x, other_features

if __name__ == '__main__':
    # 测试Hubert模型
    hubert_model = HUBERT("/home/work_nfs15/asr_data/ckpt/origin_chinese_hubert/chinese_hubert_large.pt")
    x = torch.rand(3, 10000)
    x_lens = torch.randint(6000, 10000, (3,))
    x, other_features = hubert_model(x, x_lens)
    print(x.shape)  # (3, 1024)
    print(other_features)  # (3, 10000, 768)