import torch
import numpy as np
import soundfile as sf
from pathlib import Path
from utils import load_config
from train import Demucs
import librosa

# 定义全局变量来接收从 GUI 传递过来的路径
INPUT_PATH = " "
OUTPUT_DIR = " "
CHECKPOINT = "./100epochCheckPoints/model_100.pth"
DURATION = 30  # 处理时长（秒）
SAMPLE_RATE = 44100


def load_model():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    config = load_config('config.yaml')
    model = Demucs(config['model']).to(device)
    checkpoint = torch.load(CHECKPOINT, map_location=device)
    model.load_state_dict(checkpoint['model_state_dict'])
    model.eval()
    return model, device


def separate(input_path, output_dir):
    global INPUT_PATH, OUTPUT_DIR
    INPUT_PATH = input_path
    OUTPUT_DIR = output_dir

    output_dir = Path(OUTPUT_DIR)
    output_dir.mkdir(parents=True, exist_ok=True)

    model, device = load_model()

    mix, sr = librosa.load(INPUT_PATH, sr=SAMPLE_RATE, mono=False, duration=DURATION)
    if mix.ndim == 1:  # 单声道转立体声
        mix = np.stack([mix, mix])

    mix_tensor = torch.as_tensor(mix).unsqueeze(0).to(device)

    with torch.no_grad():
        preds = model(mix_tensor)

    sources = preds[0].cpu().numpy()
    track_names = ['vocals', 'drums', 'bass', 'other']

    for i, name in enumerate(track_names):
        audio = sources[i].T
        sf.write(
            output_dir / f"{name}.wav",
            audio,
            SAMPLE_RATE,
            subtype='PCM_16'
        )
    print(f"分离完成！结果保存至：{output_dir}")