| """ |
| run_smart_sweep.py — Hybrid SPADE (v11 HF + Unrolled LF) · Bayesian sweep |
| =============================================================================== |
| |
| Architettura ibrida |
| ------------------- |
| segnale limitato |
| ↓ |
| LR crossover split a BAND_CROSSOVER_HZ (default 8000 Hz) |
| ├── HF (> 8 kHz) → SPADE v11 S-SPADE H_k (hard thresh, identico, invariato) |
| └── LF (< 8 kHz) → SPADEUnrolled model (soft thresh appreso, context GRU) |
| ↓ |
| somma LF_rec + HF_rec → segnale recuperato |
| |
| Razionale |
| --------- |
| SPADE v11 recupera bene i transienti HF (cymbal snap, hi-hat attack, snap): |
| i coefficienti DCT sopra 8 kHz sono sparsi e H_k li trova in poche iterazioni. |
| Sotto 8 kHz (corpo kick, fondamentale del basso) v11 sotto-recupera perché: |
| • il livello di sparsità k corretto è content-dipendente e il piano fisso |
| s/r/max_iter non lo indovina |
| • i contenuti tonali/sustain non sono globalmente sparsi → H_k spreca |
| budget su coefficienti HF irrilevanti |
| Il modello appreso risolve entrambi i problemi via lambda_lf adattivo e g_max. |
| |
| Pipeline di valutazione (6 tracce, standard run_smart_sweep) |
| ------------------------------------------------------------- |
| 01_orig_with_noise drum + pink noise @0 dBFS (ingresso pipeline) |
| 02_limited uscita limiter (ingresso SPADE) ≈ −LIMITER_THRESHOLD_DB dBFS |
| 03_gt_residual GT residual @RESIDUAL_DBFS (include attenuazione noise) |
| 04_spade_output uscita ibrida (float32, può >0 dBFS) |
| 05_res_iter residual ibrido @RESIDUAL_DBFS (solo componente sparsa) |
| 06_diff_residuals GT_res − res_iter @RESIDUAL_DBFS (silenzio ideale) |
| → annotato con cos_sim, diff/GT dB, noise_floor |
| |
| Sweep Bayesiano |
| --------------- |
| Il modello ML è fisso (pesi caricati da checkpoint). |
| Il TPE ottimizza i parametri classici indipendentemente per ogni banda: |
| LF: lf_delta_db, lf_max_gain_db, lf_release_ms |
| HF: hf_delta_db, hf_win, hf_hop, hf_release_ms, hf_max_gain_db, hf_eps, hf_max_iter |
| |
| USO |
| --- |
| python run_smart_sweep.py --model checkpoints/phase1_best.pt |
| python run_smart_sweep.py --model checkpoints/phase1_best.pt --trials 100 |
| python run_smart_sweep.py --model checkpoints/phase1_best.pt --debug-export 5 |
| python run_smart_sweep.py --model checkpoints/phase1_best.pt --resume |
| python run_smart_sweep.py --model checkpoints/phase1_best.pt --report |
| |
| # baseline: solo v11 broadband (senza modello ML) |
| python run_smart_sweep.py --baseline-v11 |
| |
| DIPENDENZE |
| ---------- |
| pip install numpy scipy soundfile optuna rich torch |
| spade_declip_v11.py — deve essere nel Python path |
| spade_unrolled.py — deve essere nel Python path (per HybridSPADEInference) |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import logging |
| import sys |
| import time |
| import warnings |
| from dataclasses import asdict |
| from pathlib import Path |
| from typing import Dict, List, Optional, Tuple |
|
|
| import numpy as np |
| import scipy.signal as sig |
| import soundfile as sf |
|
|
| logging.getLogger("optuna").setLevel(logging.WARNING) |
|
|
| |
| try: |
| import optuna |
| from optuna.samplers import TPESampler |
| from optuna.pruners import MedianPruner |
| _HAS_OPTUNA = True |
| except ImportError: |
| _HAS_OPTUNA = False |
| warnings.warn("optuna non trovato — pip install optuna") |
|
|
| |
| try: |
| from rich.console import Console |
| from rich.table import Table |
| _console = Console() |
| _HAS_RICH = True |
| except ImportError: |
| _HAS_RICH = False |
| _console = None |
|
|
| |
| try: |
| from spade_declip_v11 import declip as _v11_declip, DeclipParams as _V11Params |
| _HAS_V11 = True |
| except ImportError: |
| _HAS_V11 = False |
| warnings.warn("spade_declip_v11.py non trovato — processing HF non disponibile") |
|
|
| |
| try: |
| import torch |
| from spade_unrolled import ( |
| SPADEUnrolled, UnrolledConfig, SPADEUnrolledInference, |
| HybridSPADEInference, |
| ) |
| _HAS_UNROLLED = True |
| except ImportError: |
| _HAS_UNROLLED = False |
| warnings.warn("spade_unrolled.py / torch non trovati — modello ML non disponibile") |
|
|
|
|
| |
| |
| |
|
|
| DRUM_DIRS = ["Kicks", "Snares", "Perc", "Tops"] |
|
|
| |
| LIMITER_THRESHOLD_DB = 1.5 |
| LIMITER_RELEASE_MS = 80.0 |
|
|
| RESIDUAL_DBFS = -3.0 |
| PINK_NOISE_LEVEL_DB = -20.0 |
|
|
| |
| BAND_CROSSOVER_HZ = 8000.0 |
|
|
| |
| HF_FIXED = dict( |
| algo = "sspade", |
| frame = "rdft", |
| mode = "soft", |
| n_jobs = 1, |
| verbose = False, |
| show_progress = False, |
| use_gpu = True, |
| ) |
|
|
| STUDY_NAME = "hybrid_spade_v1" |
| OUT_CSV = "hybrid_sweep_results.csv" |
|
|
| |
| DEBUG_HF = dict( |
| hf_delta_db = 1.5, |
| hf_window_length = 2048, |
| hf_hop_length = 512, |
| hf_release_ms = 80.0, |
| hf_max_gain_db = 9.0, |
| hf_eps = 0.05, |
| hf_max_iter = 500, |
| ) |
| DEBUG_LF = dict( |
| lf_delta_db = 1.5, |
| lf_max_gain_db = 9.0, |
| lf_release_ms = 80.0, |
| ) |
|
|
|
|
| |
| |
| |
|
|
| def ensure_2d(a: np.ndarray) -> np.ndarray: |
| return a[:, None] if a.ndim == 1 else a |
|
|
|
|
| def normalize_to_0dBFS(a: np.ndarray) -> np.ndarray: |
| pk = np.max(np.abs(a)) |
| return a / pk if pk > 1e-12 else a |
|
|
|
|
| def normalize_peak(a: np.ndarray, target_dbfs: float) -> np.ndarray: |
| pk = np.max(np.abs(a)) |
| return a * (10 ** (target_dbfs / 20.0) / pk) if pk > 1e-12 else a |
|
|
|
|
| def generate_pink_noise( |
| n_samples: int, n_channels: int, rng: np.random.Generator |
| ) -> np.ndarray: |
| b = np.array([0.049922035, -0.095993537, 0.050612699, -0.004408786]) |
| a = np.array([1.0, -2.494956002, 2.017265875, -0.522189400]) |
| out = np.empty((n_samples, n_channels)) |
| for c in range(n_channels): |
| white = rng.standard_normal(n_samples) |
| pink = sig.lfilter(b, a, white) |
| rms = np.sqrt(np.mean(pink ** 2)) |
| out[:, c] = pink / (rms + 1e-12) |
| return out |
|
|
|
|
| def mix_pink_noise( |
| audio_0dBFS: np.ndarray, |
| sr: int, |
| level_db: float, |
| rng: np.random.Generator, |
| ) -> np.ndarray: |
| audio = ensure_2d(audio_0dBFS) |
| N, C = audio.shape |
| noise = generate_pink_noise(N, C, rng) |
| peak = np.max(np.abs(audio)) |
| gain = peak * (10 ** (level_db / 20.0)) |
| mixed = audio + noise * gain |
| return mixed[:, 0] if audio_0dBFS.ndim == 1 else mixed |
|
|
|
|
| def apply_brickwall_limiter( |
| audio_0dBFS: np.ndarray, |
| sr: int, |
| threshold_db: float = LIMITER_THRESHOLD_DB, |
| release_ms: float = LIMITER_RELEASE_MS, |
| ) -> np.ndarray: |
| thr_lin = 10 ** (-abs(threshold_db) / 20.0) |
| rc = np.exp(-1.0 / max(release_ms * sr / 1000.0, 1e-9)) |
| audio = ensure_2d(audio_0dBFS).copy() |
| N, C = audio.shape |
| out = np.empty_like(audio) |
| for c in range(C): |
| ch = audio[:, c] |
| env = 1.0 |
| g = np.empty(N) |
| for n in range(N): |
| pk = abs(ch[n]) |
| target = thr_lin / pk if pk > thr_lin else 1.0 |
| env = target if target < env else rc * env + (1.0 - rc) * target |
| g[n] = env |
| out[:, c] = ch * g |
| return out[:, 0] if audio_0dBFS.ndim == 1 else out |
|
|
|
|
| def cosine_sim_tf( |
| gt: np.ndarray, |
| est: np.ndarray, |
| sr: int, |
| win_samples: int = 1024, |
| hop_samples: int = 256, |
| n_bands: int = 12, |
| ) -> float: |
| L = min(gt.shape[0], est.shape[0]) |
| g = (gt[:L, 0] if gt.ndim == 2 else gt[:L]).copy() |
| e = (est[:L, 0] if est.ndim == 2 else est[:L]).copy() |
| win = min(win_samples, max(32, L // 4)) |
| hop = min(hop_samples, win // 2) |
| if L < win or win < 32: |
| denom = np.linalg.norm(g) * np.linalg.norm(e) + 1e-12 |
| return float(np.dot(g, e) / denom) |
| _, _, Zg = sig.stft(g, fs=sr, window="hann", nperseg=win, |
| noverlap=win - hop, boundary=None, padded=False) |
| _, _, Ze = sig.stft(e, fs=sr, window="hann", nperseg=win, |
| noverlap=win - hop, boundary=None, padded=False) |
| n_freqs, n_frames = Zg.shape |
| if n_frames == 0: |
| return float(np.dot(g, e) / (np.linalg.norm(g) * np.linalg.norm(e) + 1e-12)) |
| edges = np.unique(np.round( |
| np.logspace(0, np.log10(max(n_freqs, 2)), min(n_bands, n_freqs) + 1) |
| ).astype(int)) |
| edges = np.clip(edges, 0, n_freqs) |
| sims = [] |
| for i in range(len(edges) - 1): |
| f0, f1 = int(edges[i]), int(edges[i + 1]) |
| if f1 <= f0: continue |
| Mg = np.abs(Zg[f0:f1, :]) |
| Me = np.abs(Ze[f0:f1, :]) |
| dot = np.sum(Mg * Me, axis=0) |
| norm_g = np.sqrt(np.sum(Mg ** 2, axis=0)) + 1e-12 |
| norm_e = np.sqrt(np.sum(Me ** 2, axis=0)) + 1e-12 |
| sims.extend((dot / (norm_g * norm_e)).tolist()) |
| return float(np.mean(sims)) if sims else 0.0 |
|
|
|
|
| def _pk_dbfs(a: np.ndarray) -> float: |
| pk = float(np.max(np.abs(a))) |
| return 20.0 * np.log10(pk) if pk > 1e-12 else -999.0 |
|
|
|
|
| def _rms_dbfs(a: np.ndarray) -> float: |
| rms = float(np.sqrt(np.mean(np.asarray(a).astype(float) ** 2))) |
| return 20.0 * np.log10(rms) if rms > 1e-12 else -999.0 |
|
|
|
|
| def _write_wav(path: Path, audio: np.ndarray, sr: int) -> None: |
| a2d = ensure_2d(audio).astype(np.float32) |
| pk = float(np.max(np.abs(a2d))) |
| if pk > 1.0: |
| print(f" [WARN] {path.name}: peak={pk:.4f} (+{20*np.log10(pk):.2f} dBFS) — float32") |
| sf.write(str(path), a2d, sr, subtype="FLOAT") |
|
|
|
|
| |
| |
| |
|
|
| def build_corpus(base_dir: Path, max_files: Optional[int] = None) -> List[Dict]: |
| """ |
| Per ogni drum sample: |
| 1. Carica e normalizza a 0 dBFS peak |
| 2. Mixa rumore rosa a PINK_NOISE_LEVEL_DB |
| 3. Normalizza il mix a 0 dBFS peak |
| 4. Applica limiter sintetico → limited |
| 5. GT_res_raw = orig_with_noise − limited |
| 6. Scarta file dove il limiter non interviene |
| 7. Normalizza GT_res a RESIDUAL_DBFS |
| |
| Nuovo rispetto alla versione precedente: |
| • orig_with_noise viene salvato nel corpus (evita ricalcolo in debug_export) |
| """ |
| corpus = [] |
| extensions = {".wav", ".flac", ".aif", ".aiff"} |
| file_index = 0 |
|
|
| for folder in DRUM_DIRS: |
| d = base_dir / folder |
| if not d.exists(): |
| print(f" [WARN] Cartella non trovata: {d}") |
| continue |
| for f in sorted(d.glob("*")): |
| if f.suffix.lower() not in extensions: |
| continue |
| try: |
| audio, sr = sf.read(str(f), always_2d=True) |
| audio = audio.astype(float) |
| except Exception as exc: |
| print(f" [WARN] {f.name}: {exc}") |
| continue |
| if audio.shape[0] < 64: |
| continue |
|
|
| orig = normalize_to_0dBFS(audio) |
| rng = np.random.default_rng(seed=file_index) |
| mixed = ensure_2d(mix_pink_noise(orig, sr, PINK_NOISE_LEVEL_DB, rng)) |
| file_index += 1 |
| orig_with_noise = ensure_2d(normalize_to_0dBFS(mixed)) |
| limited = ensure_2d(apply_brickwall_limiter(orig_with_noise, sr)) |
| gt_res_raw = orig_with_noise - limited |
|
|
| if np.max(np.abs(gt_res_raw)) < 1e-6: |
| print(f" [SKIP] {f.name} — limiter inattivo") |
| continue |
|
|
| gt_res = normalize_peak(gt_res_raw, RESIDUAL_DBFS) |
|
|
| corpus.append({ |
| "file": f.name, |
| "sr": sr, |
| "orig_with_noise": orig_with_noise, |
| "limited": limited, |
| "gt_res": gt_res, |
| "gt_res_raw": gt_res_raw, |
| }) |
| if max_files and len(corpus) >= max_files: |
| return corpus |
|
|
| return corpus |
|
|
|
|
| |
| |
| |
|
|
| def _lr_split_np(x: np.ndarray, crossover_hz: float, sr: int |
| ) -> Tuple[np.ndarray, np.ndarray]: |
| """Phase-perfect LR crossover. lp + hp == x esattamente.""" |
| from scipy.signal import butter, sosfiltfilt |
| fc = float(np.clip(crossover_hz, 1.0, sr / 2.0 - 1.0)) |
| sos = butter(2, fc, btype="low", fs=sr, output="sos") |
| lp = sosfiltfilt(sos, x) |
| hp = x - lp |
| return lp, hp |
|
|
|
|
| def process_hybrid( |
| limited: np.ndarray, |
| sr: int, |
| hf_params: dict, |
| lf_model: Optional["HybridSPADEInference"], |
| lf_params: dict, |
| crossover_hz: float = BAND_CROSSOVER_HZ, |
| ) -> np.ndarray: |
| """ |
| Processa un segnale con la pipeline ibrida: |
| HF (> crossover_hz): v11 S-SPADE invariato |
| LF (< crossover_hz): SPADEUnrolled (o v11 se lf_model is None) |
| |
| Se lf_model is None → usa v11 anche per LF (modalità baseline). |
| |
| Restituisce lo stesso shape di limited. |
| """ |
| if not _HAS_V11: |
| raise RuntimeError("spade_declip_v11.py non trovato — impossibile processare HF") |
|
|
| mono = limited.ndim == 1 |
| if mono: |
| limited = limited[:, None] |
| _, C = limited.shape |
| output = np.zeros_like(limited, dtype=np.float64) |
|
|
| for ch in range(C): |
| yc = limited[:, ch].astype(np.float64) |
|
|
| |
| lf_band, hf_band = _lr_split_np(yc, crossover_hz, sr) |
|
|
| |
| hf_win = hf_params.get("hf_window_length", 2048) |
| hf_hop = hf_params.get("hf_hop_length", hf_win // 4) |
| hf_p = _V11Params( |
| sample_rate = sr, |
| delta_db = hf_params.get("hf_delta_db", 1.5), |
| window_length = hf_win, |
| hop_length = hf_hop, |
| s = hf_params.get("hf_s", 1), |
| r = hf_params.get("hf_r", 1), |
| eps = hf_params.get("hf_eps", 0.05), |
| max_iter = hf_params.get("hf_max_iter", 500), |
| max_gain_db = hf_params.get("hf_max_gain_db", 9.0), |
| release_ms = hf_params.get("hf_release_ms", 0.0), |
| **HF_FIXED, |
| ) |
| hf_rec, _ = _v11_declip(hf_band.astype(np.float32), hf_p) |
|
|
| |
| if lf_model is not None: |
| |
| lf_infer = SPADEUnrolledInference( |
| lf_model.model, |
| delta_db = lf_params.get("lf_delta_db", 1.5), |
| max_gain_db = lf_params.get("lf_max_gain_db", 9.0), |
| device = lf_model.device, |
| ) |
| lf_rec = lf_infer.process(lf_band.astype(np.float32), sr) |
| else: |
| |
| lf_win = hf_params.get("hf_window_length", 2048) |
| lf_hop = lf_win // 4 |
| lf_p = _V11Params( |
| sample_rate = sr, |
| delta_db = lf_params.get("lf_delta_db", 1.5), |
| window_length = lf_win, |
| hop_length = lf_hop, |
| eps = hf_params.get("hf_eps", 0.05), |
| max_iter = hf_params.get("hf_max_iter", 500), |
| max_gain_db = lf_params.get("lf_max_gain_db", 9.0), |
| release_ms = lf_params.get("lf_release_ms", 0.0), |
| **HF_FIXED, |
| ) |
| lf_rec, _ = _v11_declip(lf_band.astype(np.float32), lf_p) |
|
|
| |
| L = min(len(lf_rec), len(hf_rec)) |
| output[:L, ch] = lf_rec[:L].astype(np.float64) + hf_rec[:L] |
|
|
| return output[:, 0] if mono else output |
|
|
|
|
| |
| |
| |
|
|
| def evaluate_one( |
| item: Dict, |
| hf_params: dict, |
| lf_params: dict, |
| lf_model: Optional["HybridSPADEInference"], |
| ) -> Optional[float]: |
| """ |
| Esegue la pipeline ibrida su un item del corpus e restituisce il punteggio |
| cosine_sim_tf(gt_res, res_iter) in [0, 1]. 1.0 = recupero perfetto. |
| """ |
| try: |
| sr = item["sr"] |
| limited = item["limited"].copy() |
| gt_res = item["gt_res"] |
|
|
| fixed_2d = ensure_2d(process_hybrid(limited, sr, hf_params, lf_model, lf_params)) |
| res_raw = fixed_2d - limited |
| res_iter = normalize_peak(res_raw, RESIDUAL_DBFS) |
|
|
| return cosine_sim_tf(gt_res, res_iter, sr) |
| except Exception as exc: |
| warnings.warn(f"evaluate_one ({item['file']}): {exc}") |
| return None |
|
|
|
|
| |
| |
| |
|
|
| def make_objective( |
| corpus: List[Dict], |
| lf_model: Optional["HybridSPADEInference"], |
| ): |
| def objective(trial: "optuna.Trial") -> float: |
|
|
| |
| hf_delta = trial.suggest_float("hf_delta_db", 0.5, 3.0, step=0.1) |
| hf_win_e = trial.suggest_int ("hf_win_exp", 9, 11) |
| hf_hop_d = trial.suggest_categorical("hf_hop_div", [4, 8]) |
| hf_rel = trial.suggest_float("hf_release_ms", 0.0, 150.0, step=5.0) |
| hf_gain = trial.suggest_float("hf_max_gain_db", 2.0, 12.0, step=0.5) |
| hf_eps = trial.suggest_categorical("hf_eps", [0.03, 0.05, 0.1]) |
| hf_iter = trial.suggest_categorical("hf_max_iter", [250, 500, 1000]) |
| hf_win = 2 ** hf_win_e |
| hf_hop = hf_win // hf_hop_d |
|
|
| |
| |
| lf_delta = trial.suggest_float("lf_delta_db", 0.5, 3.0, step=0.1) |
| lf_gain = trial.suggest_float("lf_max_gain_db", 3.0, 12.0, step=0.5) |
| lf_rel = trial.suggest_float("lf_release_ms", 0.0, 150.0, step=5.0) |
|
|
| hf_params = dict( |
| hf_delta_db = hf_delta, |
| hf_window_length = hf_win, |
| hf_hop_length = hf_hop, |
| hf_release_ms = hf_rel, |
| hf_max_gain_db = hf_gain, |
| hf_eps = hf_eps, |
| hf_max_iter = hf_iter, |
| ) |
| lf_params = dict( |
| lf_delta_db = lf_delta, |
| lf_max_gain_db = lf_gain, |
| lf_release_ms = lf_rel, |
| ) |
|
|
| scores = [] |
| midpoint = len(corpus) // 2 |
|
|
| for step, item in enumerate(corpus): |
| sc = evaluate_one(item, hf_params, lf_params, lf_model) |
| if sc is not None: |
| scores.append(sc) |
| if step == midpoint and scores: |
| trial.report(float(np.mean(scores)), step=step) |
| if trial.should_prune(): |
| raise optuna.TrialPruned() |
|
|
| if not scores: |
| return 0.0 |
| mean_score = float(np.mean(scores)) |
| trial.report(mean_score, step=len(corpus)) |
| return mean_score |
|
|
| return objective |
|
|
|
|
| |
| |
| |
|
|
| def debug_export( |
| corpus: list, |
| base_dir: Path, |
| out_dir: Path, |
| n_files: int, |
| hf_params: dict, |
| lf_params: dict, |
| lf_model: Optional["HybridSPADEInference"], |
| ) -> None: |
| """ |
| Esporta 6 WAV float32 per i primi n_files item del corpus. |
| |
| Tracce esportate |
| ---------------- |
| 01_orig_with_noise drum + pink noise @0 dBFS (prima del limiter) |
| 02_limited uscita limiter (ingresso SPADE) |
| 03_gt_residual GT residual @RESIDUAL_DBFS |
| 04_spade_output uscita ibrida (può >0 dBFS) |
| 05_res_iter residual ibrido @RESIDUAL_DBFS |
| 06_diff_residuals GT_res − res_iter @RESIDUAL_DBFS |
| → annotato: cos_sim, diff/GT dB, noise_floor dB |
| |
| Metrica ideale: 06 = silenzio (diff → −∞ dB) |
| Floor fisico : ~ PINK_NOISE_LEVEL_DB + RESIDUAL_DBFS (rumore irrecuperabile) |
| """ |
| out_dir.mkdir(parents=True, exist_ok=True) |
| items = corpus[:n_files] |
| col_w = max(len(it["file"]) for it in items) + 2 |
|
|
| HDR = (f" {'file':<{col_w}} {'traccia':<22}" |
| f" {'peak dBFS':>10} {'RMS dBFS':>9} note") |
| SEP = " " + "─" * (len(HDR) - 2) |
|
|
| mode_str = "IBRIDO (v11 HF + ML LF)" if lf_model is not None else "BASELINE v11 broadband" |
|
|
| print() |
| if _HAS_RICH: |
| _console.rule(f"[bold cyan]DEBUG EXPORT — {mode_str}[/]") |
| else: |
| print("=" * 72) |
| print(f"DEBUG EXPORT — {mode_str}") |
| print("=" * 72) |
|
|
| print(f" Output dir : {out_dir}") |
| print(f" Modalità : {mode_str}") |
| print(f" Crossover : {BAND_CROSSOVER_HZ:.0f} Hz") |
| print(f" HF params : delta={hf_params.get('hf_delta_db',1.5):.2f}" |
| f" win={hf_params.get('hf_window_length',2048)}" |
| f" rel={hf_params.get('hf_release_ms',0):.0f}ms" |
| f" gain={hf_params.get('hf_max_gain_db',9):.1f}dB" |
| f" eps={hf_params.get('hf_eps',0.05)}" |
| f" iter={hf_params.get('hf_max_iter',500)}") |
| print(f" LF params : delta={lf_params.get('lf_delta_db',1.5):.2f}" |
| f" gain={lf_params.get('lf_max_gain_db',9):.1f}dB" |
| f" rel={lf_params.get('lf_release_ms',0):.0f}ms") |
| print(f" File esportati: {len(items)}") |
| print() |
| print(f" Livelli attesi:") |
| print(f" 01 ≈ 0.00 dBFS (normalizzato prima del limiter)") |
| print(f" 02 ≈ {-LIMITER_THRESHOLD_DB:+.2f} dBFS (uscita limiter)") |
| print(f" 03 = {RESIDUAL_DBFS:+.2f} dBFS (GT residual normalizzato)") |
| print(f" 04 può >0 dBFS (transiente recuperato)") |
| print(f" 05 = {RESIDUAL_DBFS:+.2f} dBFS (residual ibrido normalizzato)") |
| print(f" 06 << 0 dBFS (più basso = migliore)") |
| print() |
| print(HDR) |
|
|
| diff_stats = [] |
|
|
| for file_idx, item in enumerate(items): |
| sr = item["sr"] |
| limited = item["limited"].copy() |
| gt_res = item["gt_res"] |
| gt_res_raw = item["gt_res_raw"] |
| orig_with_noise = item["orig_with_noise"] |
| stem = Path(item["file"]).stem |
|
|
| |
| try: |
| fixed_2d = ensure_2d( |
| process_hybrid(limited.copy(), sr, hf_params, lf_model, lf_params) |
| ) |
| except Exception as exc: |
| print(f" [ERRORE] {item['file']}: {exc}") |
| continue |
|
|
| |
| res_raw = fixed_2d - limited |
|
|
| |
| gt_arr = gt_res_raw |
| est_arr = res_raw |
| L = min(gt_arr.shape[0], est_arr.shape[0]) |
|
|
| |
| g_flat = gt_arr[:L, 0] if gt_arr.ndim == 2 else gt_arr[:L] |
| e_flat = est_arr[:L, 0] if est_arr.ndim == 2 else est_arr[:L] |
| cos_sim_td = float( |
| np.dot(g_flat, e_flat) / |
| (np.linalg.norm(g_flat) * np.linalg.norm(e_flat) + 1e-12) |
| ) |
|
|
| |
| diff_raw = gt_arr[:L] - est_arr[:L] |
| diff_rms_db = _rms_dbfs(diff_raw) |
| gt_rms_db = _rms_dbfs(gt_arr[:L]) |
| diff_vs_gt_db = diff_rms_db - gt_rms_db |
|
|
| |
| noise_floor_db = PINK_NOISE_LEVEL_DB + RESIDUAL_DBFS |
|
|
| |
| res_iter = normalize_peak(res_raw, RESIDUAL_DBFS) |
| diff_norm = (normalize_peak(diff_raw, RESIDUAL_DBFS) |
| if np.max(np.abs(diff_raw)) > 1e-12 |
| else diff_raw) |
|
|
| diff_stats.append((diff_vs_gt_db, cos_sim_td)) |
|
|
| |
| tracks = [ |
| ("01_orig_with_noise", |
| orig_with_noise, |
| f"drum+noise @0dBFS (input pipeline)"), |
| ("02_limited", |
| limited, |
| f"uscita limiter (input SPADE) atteso: ~{-LIMITER_THRESHOLD_DB:+.2f}dBFS"), |
| ("03_gt_residual", |
| gt_res, |
| f"GT residual @{RESIDUAL_DBFS:.0f}dBFS (include noise attenuation)"), |
| ("04_spade_output", |
| fixed_2d, |
| f"SPADE output (float32, puo' >0dBFS)"), |
| ("05_res_iter", |
| res_iter, |
| f"residual SPADE @{RESIDUAL_DBFS:.0f}dBFS (solo componente sparsa)"), |
| ("06_diff_residuals", |
| diff_norm, |
| f"GT - iter @{RESIDUAL_DBFS:.0f}dBFS " |
| f"cos_sim={cos_sim_td:.3f} diff/GT={diff_vs_gt_db:+.1f}dB " |
| f"noise_floor≈{noise_floor_db:+.1f}dB"), |
| ] |
|
|
| |
| print(SEP) |
| for track_name, audio, note in tracks: |
| pk = _pk_dbfs(audio) |
| rms = _rms_dbfs(audio) |
| flag = "" |
| if track_name == "06_diff_residuals": |
| if diff_vs_gt_db < -12: flag = "[OK] buona convergenza" |
| elif diff_vs_gt_db < -6: flag = "[~] convergenza parziale" |
| else: flag = "[WARN] diff elevato rispetto al GT" |
| row = (f" {item['file']:<{col_w}} {track_name:<22}" |
| f" {pk:>+10.2f} {rms:>+9.2f} {note} {flag}") |
| if _HAS_RICH: |
| color = ("green" if "[OK]" in flag else |
| "yellow" if "[~]" in flag else |
| "red" if "[WARN]" in flag else "") |
| _console.print(row.replace(flag, f"[{color or 'dim'}]{flag}[/]") if flag else row) |
| else: |
| print(row) |
| _write_wav(out_dir / f"{stem}__{track_name}.wav", audio, sr) |
|
|
| |
| BANDS_SPEC = [ |
| ("Sub-bass ", 20, 80), |
| ("Bass ", 80, 250), |
| ("Low-mid ", 250, 800), |
| ("High-mid ", 800, 4000), |
| ("High <8k ", 4000, 8000), |
| ("High >8k ", 8000, 20000), |
| ] |
|
|
| def band_energy(audio_2d, sr, f_lo, f_hi): |
| mono = audio_2d[:, 0] if audio_2d.ndim == 2 else audio_2d |
| N = len(mono) |
| if N < 8: return -999.0 |
| nyq = sr / 2.0 |
| lo = max(f_lo / nyq, 1e-4) |
| hi = min(f_hi / nyq, 0.9999) |
| if lo >= hi: return -999.0 |
| if lo < 1e-3: |
| b2, a2 = sig.butter(4, hi, btype="low") |
| else: |
| b2, a2 = sig.butter(4, [lo, hi], btype="band") |
| filtered = sig.filtfilt(b2, a2, mono) |
| return _rms_dbfs(filtered) |
|
|
| print() |
| band_hdr = (f" {'banda':<12} {'GT_res RMS':>10} {'iter rec RMS':>13}" |
| f" {'diff':>6} {'stato'}") |
| print(f" Analisi spettrale — {item['file']} (LF/HF split @ {BAND_CROSSOVER_HZ:.0f} Hz)") |
| print(f" {'─'*76}") |
| print(band_hdr) |
| print(f" {'─'*76}") |
| for bname, f_lo, f_hi in BANDS_SPEC: |
| gt_db = band_energy(gt_res_raw, sr, f_lo, f_hi) |
| iter_db = band_energy(res_raw, sr, f_lo, f_hi) |
| is_hf = f_lo >= BAND_CROSSOVER_HZ |
| label = "v11 HF →" if is_hf else "ML LF →" |
| if gt_db < -60: |
| rec_str = " — (silenzio)" |
| status = "" |
| else: |
| d = iter_db - gt_db |
| status = ("OK" if d > -3 else |
| "~ parziale" if d > -9 else |
| "!! sotto") |
| rec_str = f"{d:>+6.1f} dB {status}" |
| line = f" {bname:<12} {gt_db:>+10.1f} {iter_db:>+13.1f} {rec_str} [{label}]" |
| if _HAS_RICH: |
| color = ("green" if "OK" in rec_str else |
| "yellow" if "~" in rec_str else |
| "red" if "!!" in rec_str else "dim") |
| _console.print(f"[{color}]{line}[/]") |
| else: |
| print(line) |
| print() |
|
|
| |
| print(SEP) |
| if diff_stats: |
| vs_gt = [d[0] for d in diff_stats] |
| cosims = [d[1] for d in diff_stats] |
| nf_db = PINK_NOISE_LEVEL_DB + RESIDUAL_DBFS |
|
|
| print(f"\n RIEPILOGO ({len(diff_stats)} file):") |
| print(f" diff/GT_rms media : {np.mean(vs_gt):>+7.2f} dB") |
| print(f" diff/GT_rms migliore: {np.min(vs_gt):>+7.2f} dB") |
| print(f" diff/GT_rms peggiore: {np.max(vs_gt):>+7.2f} dB") |
| print(f" cos_sim TD media : {np.mean(cosims):>8.4f} (1.0 = identici)") |
| print() |
| print(f" Floor fisico (rumore irrecuperabile): ≈ {nf_db:+.1f} dBFS") |
| print(f" Soglia 'buona convergenza': diff/GT < −12 dB") |
| verdict = ("OK eccellente" if np.mean(vs_gt) < -12 else |
| "~ buona" if np.mean(vs_gt) < -6 else |
| "INFO compatibile con noise floor") |
| print(f" Verdetto: {verdict}") |
|
|
| print(f"\n WAV → {out_dir}/") |
| print(f" Formato: float32 (usa editor che supporta >0 dBFS)") |
| print(f" Nome: <stem>__<N>_<traccia>.wav") |
|
|
|
|
| |
| |
| |
|
|
| def print_report(study: "optuna.Study", top_n: int = 20): |
| trials = sorted( |
| [t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE], |
| key=lambda t: t.value or 0, reverse=True, |
| ) |
| if not trials: |
| print("Nessun trial completato.") |
| return |
|
|
| if _HAS_RICH: |
| _console.rule("[bold cyan]RISULTATI SWEEP BAYESIANO — HYBRID SPADE[/]") |
| tbl = Table(show_header=True, header_style="bold cyan", show_lines=False) |
| for col, w in [ |
| ("#",4),("score",9), |
| ("HF_ddb",6),("HF_win",6),("HF_rel",6),("HF_gain",6),("HF_eps",5),("HF_iter",5), |
| ("LF_ddb",6),("LF_gain",6),("LF_rel",6), |
| ]: |
| tbl.add_column(col, justify="right", width=w) |
| for rank, t in enumerate(trials[:top_n], 1): |
| p = t.params |
| win = 2 ** p.get("hf_win_exp", 11) |
| hop = win // p.get("hf_hop_div", 4) |
| sty = "bold green" if rank == 1 else ("yellow" if rank <= 3 else "") |
| tbl.add_row( |
| str(rank), f"{t.value:.5f}", |
| f"{p['hf_delta_db']:.2f}", str(win), |
| f"{p['hf_release_ms']:.0f}", f"{p['hf_max_gain_db']:.1f}", |
| str(p['hf_eps']), str(p['hf_max_iter']), |
| f"{p['lf_delta_db']:.2f}", f"{p['lf_max_gain_db']:.1f}", |
| f"{p['lf_release_ms']:.0f}", |
| style=sty, |
| ) |
| _console.print(tbl) |
| else: |
| hdr = (f"{'#':>3} {'score':>8} {'HFddb':>5} {'HFwin':>5}" |
| f" {'HFrel':>5} {'HFgain':>6} {'HFeps':>5} {'HFiter':>5}" |
| f" {'LFddb':>5} {'LFgain':>6} {'LFrel':>5}") |
| print(hdr); print("─" * len(hdr)) |
| for rank, t in enumerate(trials[:top_n], 1): |
| p = t.params |
| win = 2 ** p.get("hf_win_exp", 11) |
| print(f"{rank:>3} {t.value:>8.5f} {p['hf_delta_db']:>5.2f}" |
| f" {win:>5} {p['hf_release_ms']:>5.0f}" |
| f" {p['hf_max_gain_db']:>6.1f} {str(p['hf_eps']):>5}" |
| f" {p['hf_max_iter']:>5}" |
| f" {p['lf_delta_db']:>5.2f} {p['lf_max_gain_db']:>6.1f}" |
| f" {p['lf_release_ms']:>5.0f}") |
|
|
| best = trials[0] |
| p = best.params |
| win = 2 ** p.get("hf_win_exp", 11) |
| hop = win // p.get("hf_hop_div", 4) |
| print("\n" + "═" * 60) |
| print("CONFIG OTTIMALE — HYBRID SPADE") |
| print("═" * 60) |
| print(f""" |
| hf_params = dict( |
| hf_delta_db = {p['hf_delta_db']:.2f}, |
| hf_window_length = {win}, |
| hf_hop_length = {hop}, |
| hf_release_ms = {p['hf_release_ms']:.1f}, |
| hf_max_gain_db = {p['hf_max_gain_db']:.1f}, |
| hf_eps = {p['hf_eps']}, |
| hf_max_iter = {p['hf_max_iter']}, |
| ) |
| lf_params = dict( |
| lf_delta_db = {p['lf_delta_db']:.2f}, |
| lf_max_gain_db = {p['lf_max_gain_db']:.1f}, |
| lf_release_ms = {p['lf_release_ms']:.1f}, |
| ) |
| """) |
| print(f"→ Best score : {best.value:.5f}") |
| n_pruned = sum(1 for t in study.trials if t.state == optuna.trial.TrialState.PRUNED) |
| print(f" Completed : {len(trials)} Pruned : {n_pruned}") |
|
|
|
|
| def save_csv(study: "optuna.Study"): |
| import csv |
| trials = sorted( |
| [t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE], |
| key=lambda t: t.value or 0, reverse=True, |
| ) |
| if not trials: |
| return |
| fieldnames = ["rank", "score"] + list(trials[0].params.keys()) |
| with open(OUT_CSV, "w", newline="") as f: |
| w = csv.DictWriter(f, fieldnames=fieldnames) |
| w.writeheader() |
| for rank, t in enumerate(trials, 1): |
| row = {"rank": rank, "score": f"{t.value:.6f}"} |
| row.update({k: f"{v:.4f}" if isinstance(v, float) else v |
| for k, v in t.params.items()}) |
| w.writerow(row) |
| print(f"\n CSV salvato: {OUT_CSV}") |
|
|
|
|
| |
| |
| |
|
|
| def main(): |
| p = argparse.ArgumentParser( |
| description="Hybrid SPADE (v11 HF + Unrolled LF) — Bayesian sweep", |
| formatter_class=argparse.ArgumentDefaultsHelpFormatter, |
| ) |
| p.add_argument("--base-dir", type=Path, default=Path("./Samples"), |
| help="Cartella radice contenente Kicks/, Snares/, ecc.") |
| p.add_argument("--model", type=Path, default=None, |
| dest="model_ckpt", |
| help="Checkpoint SPADEUnrolled (.pt). Ometti per baseline v11.") |
| p.add_argument("--trials", type=int, default=200) |
| p.add_argument("--resume", action="store_true", |
| help="Riprende uno studio Optuna esistente") |
| p.add_argument("--report", action="store_true", |
| help="Stampa solo il report dal DB esistente") |
| p.add_argument("--debug-export", type=int, default=0, |
| metavar="N", |
| help="Esporta le 6 tracce WAV per i primi N file del corpus") |
| p.add_argument("--debug-out", type=Path, default=Path("debug_export"), |
| help="Directory di output per --debug-export") |
| p.add_argument("--baseline-v11", action="store_true", |
| help="Usa solo v11 broadband (nessun modello ML) come baseline") |
| p.add_argument("--crossover-hz", type=float, default=BAND_CROSSOVER_HZ, |
| help="Frequenza di crossover LF/HF in Hz") |
| p.add_argument("--max-files", type=int, default=None, |
| help="Limita il corpus ai primi N file (test rapido)") |
| p.add_argument("--top", type=int, default=20, |
| help="Numero di trial da mostrare nel report") |
| p.add_argument("--db-path", type=str, default=f"sqlite:///{STUDY_NAME}.db", |
| help="SQLite URI per Optuna (default: sqlite:///hybrid_spade_v1.db)") |
| args = p.parse_args() |
|
|
| if not _HAS_V11: |
| print("[ERRORE] spade_declip_v11.py non trovato — uscita.") |
| sys.exit(1) |
|
|
| |
| print(f"\n Caricamento corpus da: {args.base_dir}") |
| corpus = build_corpus(args.base_dir, max_files=args.max_files) |
| if not corpus: |
| print("[ERRORE] Corpus vuoto — controlla --base-dir e le cartelle drum.") |
| sys.exit(1) |
| print(f" Corpus: {len(corpus)} file\n") |
|
|
| |
| lf_model = None |
| if args.model_ckpt is not None and not args.baseline_v11: |
| if not _HAS_UNROLLED: |
| print("[ERRORE] spade_unrolled.py / PyTorch non trovati.") |
| sys.exit(1) |
| ckpt = torch.load(args.model_ckpt, map_location="cpu") |
| cfg = UnrolledConfig(**ckpt["cfg"]) |
| model = SPADEUnrolled(cfg) |
| model.load_state_dict(ckpt["model"]) |
| model.eval() |
| lf_model = HybridSPADEInference( |
| model, |
| crossover_hz = args.crossover_hz, |
| lf_delta_db = DEBUG_LF["lf_delta_db"], |
| lf_max_gain_db = DEBUG_LF["lf_max_gain_db"], |
| device = "auto", |
| ) |
| print(f" Modello caricato: {args.model_ckpt}") |
| print(f" Crossover: {args.crossover_hz:.0f} Hz") |
| print(f" Parametri: {model.parameter_count():,} trainable\n") |
| else: |
| print(f" Modalità: {'baseline v11 broadband' if args.baseline_v11 else 'baseline v11 (nessun modello specificato)'}\n") |
|
|
| |
| if args.report: |
| if not _HAS_OPTUNA: |
| print("[ERRORE] optuna non trovato.") |
| sys.exit(1) |
| study = optuna.load_study(study_name=STUDY_NAME, storage=args.db_path) |
| print_report(study, top_n=args.top) |
| save_csv(study) |
| return |
|
|
| |
| if args.debug_export > 0: |
| |
| best_hf = dict(DEBUG_HF) |
| best_lf = dict(DEBUG_LF) |
| if _HAS_OPTUNA: |
| try: |
| study = optuna.load_study(study_name=STUDY_NAME, storage=args.db_path) |
| complete = [t for t in study.trials |
| if t.state == optuna.trial.TrialState.COMPLETE] |
| if complete: |
| bp = max(complete, key=lambda t: t.value or 0).params |
| win = 2 ** bp.get("hf_win_exp", 11) |
| hop = win // bp.get("hf_hop_div", 4) |
| best_hf = dict( |
| hf_delta_db = bp.get("hf_delta_db", 1.5), |
| hf_window_length = win, |
| hf_hop_length = hop, |
| hf_release_ms = bp.get("hf_release_ms", 0.0), |
| hf_max_gain_db = bp.get("hf_max_gain_db", 9.0), |
| hf_eps = bp.get("hf_eps", 0.05), |
| hf_max_iter = bp.get("hf_max_iter", 500), |
| ) |
| best_lf = dict( |
| lf_delta_db = bp.get("lf_delta_db", 1.5), |
| lf_max_gain_db = bp.get("lf_max_gain_db", 9.0), |
| lf_release_ms = bp.get("lf_release_ms", 0.0), |
| ) |
| print(f" Best trial caricato dal DB ({len(complete)} completati)") |
| except Exception: |
| pass |
|
|
| debug_export(corpus, args.base_dir, args.debug_out, |
| args.debug_export, best_hf, best_lf, lf_model) |
| return |
|
|
| |
| if not _HAS_OPTUNA: |
| print("[ERRORE] optuna non trovato — pip install optuna") |
| sys.exit(1) |
|
|
| sampler = TPESampler(multivariate=True, seed=42) |
| pruner = MedianPruner(n_startup_trials=10, n_warmup_steps=len(corpus)//2) |
|
|
| if args.resume: |
| study = optuna.load_study( |
| study_name=STUDY_NAME, storage=args.db_path, |
| sampler=sampler, pruner=pruner, |
| ) |
| print(f" Studio ripreso: {len(study.trials)} trial esistenti") |
| else: |
| study = optuna.create_study( |
| study_name=STUDY_NAME, storage=args.db_path, |
| direction="maximize", |
| sampler=sampler, pruner=pruner, |
| load_if_exists=True, |
| ) |
|
|
| objective = make_objective(corpus, lf_model) |
|
|
| |
| _state = { |
| "done": 0, "pruned": 0, |
| "best": float("-inf"), "best_p": {}, "last": float("-inf"), |
| "t0": time.time(), |
| } |
|
|
| try: |
| from rich.progress import ( |
| Progress, BarColumn, TextColumn, |
| TimeElapsedColumn, TimeRemainingColumn, MofNCompleteColumn, |
| ) |
| _has_rich_p = True |
| except ImportError: |
| _has_rich_p = False |
|
|
| try: |
| import tqdm as _tqdm_mod |
| _has_tqdm = True |
| except ImportError: |
| _has_tqdm = False |
|
|
| def _on_trial_end(study, trial): |
| fin = trial.state == optuna.trial.TrialState.COMPLETE |
| prn = trial.state == optuna.trial.TrialState.PRUNED |
| if fin: |
| _state["done"] += 1 |
| _state["last"] = trial.value or 0.0 |
| if _state["last"] > _state["best"]: |
| _state["best"] = _state["last"] |
| _state["best_p"] = dict(study.best_params) |
| elif prn: |
| _state["pruned"] += 1 |
|
|
| t0 = time.time() |
| try: |
| if _has_rich_p: |
| progress = Progress( |
| TextColumn("[bold cyan]Trial[/] [cyan]{task.completed}/{task.total}[/]"), |
| BarColumn(bar_width=32), |
| MofNCompleteColumn(), |
| TextColumn(" score [green]{task.fields[last]:.5f}[/]"), |
| TextColumn(" best [bold green]{task.fields[best]:.5f}[/]"), |
| TextColumn(" [dim]pruned {task.fields[pruned]}[/]"), |
| TimeElapsedColumn(), TextColumn("ETA"), TimeRemainingColumn(), |
| refresh_per_second=4, |
| ) |
| def _on_trial_rich(study, trial): |
| _on_trial_end(study, trial) |
| progress.update(task_id, advance=1, |
| last=_state["last"], |
| best=max(_state["best"], 0.0), |
| pruned=_state["pruned"]) |
| with progress: |
| task_id = progress.add_task( |
| "sweep", total=args.trials, |
| last=0.0, best=0.0, pruned=0, |
| ) |
| study.optimize(objective, n_trials=args.trials, |
| callbacks=[_on_trial_rich], |
| show_progress_bar=False) |
| elif _has_tqdm: |
| import tqdm |
| pbar = tqdm.tqdm(total=args.trials, unit="trial") |
| def _on_trial_tqdm(study, trial): |
| _on_trial_end(study, trial) |
| pbar.update(1) |
| pbar.set_postfix(score=f"{_state['last']:.5f}", |
| best=f"{_state['best']:.5f}", |
| pruned=_state["pruned"]) |
| study.optimize(objective, n_trials=args.trials, |
| callbacks=[_on_trial_tqdm], show_progress_bar=False) |
| pbar.close() |
| else: |
| study.optimize(objective, n_trials=args.trials, |
| callbacks=[_on_trial_end], show_progress_bar=False) |
| except KeyboardInterrupt: |
| print("\n[!] Interrotto — risultati parziali salvati.") |
|
|
| elapsed = time.time() - t0 |
| n_done = sum(1 for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE) |
| n_prune = sum(1 for t in study.trials if t.state == optuna.trial.TrialState.PRUNED) |
| print(f"\n Completati: {n_done} | Pruned: {n_prune}" |
| f" | Tempo: {elapsed/60:.1f} min" |
| f" | Media: {elapsed/max(n_done+n_prune,1):.1f} s/trial") |
|
|
| print_report(study, top_n=args.top) |
| save_csv(study) |
| print("\nDone.") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|