import torch
import torchaudio
import numpy as np
from model.htsat import HTSAT_Swin_Transformer
import config
import librosa
import torchaudio.transforms as T

def wav2fbank(wav, sr=32000, target_length=1024, n_mels=128):
    """Convert waveform to FBANK features"""
    if isinstance(wav, np.ndarray):
        wav = torch.tensor(wav, dtype=torch.float32)

    if wav.ndim == 2:
        wav = wav[0]

    transform = T.MelSpectrogram(
        sample_rate=sr,
        n_fft=1024,
        hop_length=320,
        win_length=1024,
        f_min=50,
        f_max=14000,
        n_mels=n_mels,
        power=2.0
    )
    mel = transform(wav)
    mel = torch.log(mel + 1e-6)

    if mel.shape[1] < target_length:
        mel = torch.nn.functional.pad(mel, (0, target_length - mel.shape[1]))
    else:
        mel = mel[..., :target_length]

    return mel.transpose(0, 1)  # [time, n_mels]

# 关键修改1：权重键名修复函数
def fix_state_dict(state_dict):
    return {k.replace('sed_model.', ''): v for k, v in state_dict.items()}

# 初始化模型
model = HTSAT_Swin_Transformer(
    spec_size=config.htsat_spec_size,
    patch_size=config.htsat_patch_size,
    in_chans=1,
    num_classes=config.classes_num,
    window_size=config.htsat_window_size,
    config=config,
    depths=config.htsat_depth,
    embed_dim=config.htsat_dim,
    patch_stride=config.htsat_stride,
    num_heads=config.htsat_num_head
)

# 关键修改2：加载并修复权重
checkpoint = torch.load(
    r"C:\Users\Admin\Desktop\语音处理\大作业\HTS-Audio-Transformer\results\exp_htsat_pretrain\checkpoint\lightning_logs\version_0\checkpoints\l-epoch=70-acc=0.665.ckpt",
    map_location="cpu",
    weights_only=True  # 解决安全警告
)

# 应用键名修复
fixed_state_dict = fix_state_dict(checkpoint['state_dict'])
model.load_state_dict(fixed_state_dict, strict=False)  # strict=False允许部分加载
model.eval()

# 音频预处理
def preprocess_audio(file_path, target_sec=10, sr=32000):
    # 加载并裁剪音频
    waveform, _ = librosa.load(file_path, sr=sr)
    if len(waveform) < sr * target_sec:
        waveform = np.pad(waveform, (0, max(0, sr * target_sec - len(waveform))))
    else:
        waveform = waveform[:sr * target_sec]
    
    # 确保最终形状为 [batch, channels, time]
    return torch.tensor(waveform, dtype=torch.float32).unsqueeze(0)  # [1, 1, time]

# 推理
file_path = r"C:\Users\Admin\Desktop\语音处理\大作业\HTS-Audio-Transformer\esc-50\audio_32k\5-263775-B-26.wav"
input_tensor = preprocess_audio(file_path)


with torch.no_grad():
    output = model(input_tensor)
    pred_class = torch.argmax(output['clipwise_output'], dim=1).item()  # 注意使用正确的输出键

esc50_labels = [
    'dog', 'rooster', 'pig', 'cow', 'frog', 'cat', 'hen', 'insects', 'sheep', 'crow',
    'rain', 'sea_waves', 'crackling_fire', 'crickets', 'chirping_birds', 'water_drops',
    'wind', 'pouring_water', 'toilet_flush', 'thunderstorm', 'crying_baby', 'sneezing',
    'clapping', 'breathing', 'coughing', 'footsteps', 'laughing', 'brushing_teeth',
    'snoring', 'drinking_sipping', 'door_wood_knock', 'mouse_click', 'keyboard_typing',
    'door_wood_creaks', 'can_opening', 'washing_machine', 'vacuum_cleaner', 'clock_alarm',
    'clock_tick', 'glass_breaking', 'helicopter', 'chainsaw', 'siren', 'car_horn',
    'engine', 'train', 'church_bells', 'airplane', 'fireworks', 'hand_saw'
]
print(f"预测结果: 类别ID={pred_class}, 标签={esc50_labels[pred_class]}")