import torch.nn as nn
import torch.nn.functional as F
import torch
from librosa.filters import mel as librosa_mel_fn
import librosa

class Audio2Mel(nn.Module):
    def __init__(self, win_length=1024, hop_length=256, n_mel_channels=80, 
                sampling_rate=24000, fmin=0.0, fmax=12000.0):
        super().__init__()
        self.n_fft = win_length
        self.hop_length = hop_length
        self.n_mel_channels = n_mel_channels
        self.sampling_rate = sampling_rate
        
        # 注册缓冲区
        self.register_buffer('window', torch.hann_window(win_length))
        
        mel_basis = librosa.filters.mel(
            sr=sampling_rate,
            n_fft=win_length,
            n_mels=n_mel_channels,
            fmin=fmin,
            fmax=min(fmax, sampling_rate//2)  # 安全限制
        )
        self.register_buffer('mel_basis', torch.from_numpy(mel_basis).float())

    def forward(self, audioin):
        # 输入音频形状检查
        if audioin.dim() == 2:
            audioin = audioin.unsqueeze(1)  # [batch, 1, samples]
            
        p = (self.n_fft - self.hop_length) // 2
        audio = F.pad(audioin, (p, p), "reflect").squeeze(1)
        
        fft = torch.stft(
            audio,
            n_fft=self.n_fft,
            hop_length=self.hop_length,
            win_length=self.n_fft,
            window=self.window,
            center=False,
            return_complex=True
        )
        magnitude = torch.abs(fft)
        mel_output = torch.matmul(self.mel_basis, magnitude.pow(2))
        log_mel_spec = torch.log10(torch.clamp(mel_output, min=1e-5))
        return log_mel_spec