import sys, os
sys.path.append(os.path.dirname(__file__))
import torch
import torchaudio
from CSBSmodel import CSBSModel
import data_processing

DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
SAMPLE_RATE = 44100

def process_single_song(audio_path):
    # 1. 加载音频
    waveform, sample_rate = torchaudio.load(audio_path, format="mp3")
    waveform = waveform.to(DEVICE)
    
    # 2. 重采样
    if sample_rate != SAMPLE_RATE:
        resampler = torchaudio.transforms.Resample(
            orig_freq=sample_rate,
            new_freq=SAMPLE_RATE
        ).to(DEVICE)
        waveform = resampler(waveform)
    
    # # 3. 跳过前15秒
    # waveform = waveform[:, SAMPLE_RATE*15:]
    
    # 4. 单通道
    if waveform.shape[0] > 1:
        waveform = torch.mean(waveform, dim=0, keepdim=True)
    
    # 5. 归一化
    waveform = waveform / waveform.abs().max()
    
    # 6. 分帧
    frames = data_processing.frame_audio(waveform, frame_length=22050, frame_step=22050).to(DEVICE)
    
    # 7. 加窗
    frames_windowed = data_processing.windows(frames, 22050).to(DEVICE)
    
    # 8. MFCC特征
    transform_mfcc = torchaudio.transforms.MFCC(
        sample_rate=SAMPLE_RATE,
        n_mfcc=30,
        log_mels=True,
        melkwargs={"n_mels": 128, "n_fft": 2048, "hop_length": 512}
    ).to(DEVICE)
    frames_mfcc = transform_mfcc(frames_windowed)  # [1, 帧数, 30, 时间步]
    
    # 9. 调整为模型输入格式 [1, 1, 帧数, 30, 时间步]
    frames_mfcc = frames_mfcc.unsqueeze(0)  # 增加batch维
    # 如果你的模型需要channel维，可以再加一维
    # frames_mfcc = frames_mfcc.unsqueeze(0)  # 如果还需要channel维
    
    print("最终输入shape:", frames_mfcc.shape)
    return frames_mfcc

if __name__ == "__main__":
    audio_path = "D:\pycharm\PythonProject\DMER\datasets\\test\chorus\812.mp3"  # 替换为你的音频路径
    input_tensor = process_single_song(audio_path)
    
    # 加载模型并推理
    model = CSBSModel(1, 32, 3, 128, 2).to(DEVICE)
    state_dict = torch.load('model_logs/CSBSmodel_epoch10_10.pth', weights_only=True)
    model.load_state_dict(state_dict)
    model.eval()
    with torch.no_grad():
        output = model(input_tensor)
        print("模型输出shape:", output.shape)
        print("VA值:", output)