mymodel / run_smart_sweep_old3.py
simone00's picture
Add files using upload-large-folder tool
17d4058 verified
"""
run_smart_sweep.py — S-SPADE · Bayesian parameter search (v2)
===================================================================
PIPELINE GROUND-TRUTH (Case 1 — threshold-based limiter)
---------------------------------------------------------
Il limiter sintetico è threshold-based:
- Originale normalizzato a 0 dBFS peak
- Limiter: attua solo sui picchi sopra la soglia → output max peak ≈ −threshold_db
- Il CORPO del segnale (loudness percepita) rimane invariato per definizione
- NON si applica nessun gain al segnale limitato dopo il processing
Allineamento per il calcolo residual:
Originale e limited sono già sulla stessa scala (loudness uguale, picchi diversi).
Nessuna normalizzazione LUFS / RMS necessaria o corretta.
GT_res = original_0dBFS − limited (scale identiche)
res_iter = spade_output − limited (idem)
Entrambi vengono poi normalizzati a RESIDUAL_DBFS peak SOLO per rendere
comparabili file con diversi livelli assoluti — non altera la logica.
Metrica ideale:
GT_res ≡ res_iter → cosine_sim = 1.0 → differenza = −∞ dB
Ottimizzatore: Optuna TPE (Bayesian) + MedianPruner
Storage: SQLite (riprendibile con --resume)
Corpus: tutti i drum sample in Kicks / Snares / Perc / Tops
DIPENDENZE
----------
pip install numpy scipy soundfile optuna rich
(pyloudnorm NON necessario)
USO
---
python run_smart_sweep.py # 200 trial
python run_smart_sweep.py --trials 50 # test rapido
python run_smart_sweep.py --resume # riprende da DB
python run_smart_sweep.py --report # solo risultati
python run_smart_sweep.py --base-dir /path/SPADE # cartella custom
"""
import argparse
import logging
import os
import sys
import time
import warnings
from pathlib import Path
from typing import Dict, List, Optional
# ── AMD ROCm performance tuning ───────────────────────────────────────────────
# Must be set BEFORE any torch/ROCm import.
#
# HSA_ENABLE_SDMA=0: disabilita il DMA engine per i trasferimenti host↔device.
# Su RDNA (RX 6700 XT e simili) l'SDMA engine ha latenza elevata per batch
# piccoli (<1 MB). Usando compute-shader blits invece, il primo trasferimento
# è 3-5× più veloce. Nessun effetto su batch grandi.
#
# GPU_MAX_HW_QUEUES=4: limita le hardware queue a 4 (default=8 su RDNA).
# Con 8 queue e un singolo dispatch stream, il driver distribuisce le wave
# su queue diverse causando serializzazione. Con 4 si concentrano sullo stesso
# ring buffer e si riduce la latenza di scheduling.
#
# HSA_OVERRIDE_GFX_VERSION: solo se necessario (RX 6700 XT = gfx1031 → OK as-is).
os.environ.setdefault("HSA_ENABLE_SDMA", "0")
os.environ.setdefault("GPU_MAX_HW_QUEUES", "4")
import numpy as np
import scipy.signal as sig
import soundfile as sf
logging.getLogger("optuna").setLevel(logging.WARNING)
# ── optuna ───────────────────────────────────────────────────────────────────
try:
import optuna
from optuna.samplers import TPESampler
from optuna.pruners import MedianPruner
_HAS_OPTUNA = True
except ImportError:
_HAS_OPTUNA = False
warnings.warn("optuna non trovato — pip install optuna")
# ── rich ─────────────────────────────────────────────────────────────────────
try:
from rich.console import Console
from rich.table import Table
_console = Console()
_HAS_RICH = True
except ImportError:
_HAS_RICH = False
_console = None
# ── spade_declip ─────────────────────────────────────────────────────────────
try:
from spade_declip_v12 import (
declip, DeclipParams,
# Internals needed for the GPU mega-batch path in evaluate_corpus_gpu_mega:
_compute_masks, _dilate_masks_soft, _macro_expand_pass,
_build_lf_mask, _sspade_batch_gpu,
ClippingMasks,
)
_HAS_SPADE = True
except ImportError:
_HAS_SPADE = False
warnings.warn("spade_declip_v12.py non trovato")
# =============================================================================
# CONFIG
# =============================================================================
DRUM_DIRS = ["Kicks", "Snares", "Perc", "Tops"]
# ── Limiter sintetico ─────────────────────────────────────────────────────────
# Case 1: threshold-based.
# Originale @ 0 dBFS peak → limiter attua sui picchi > soglia →
# output max peak ≈ −LIMITER_THRESHOLD_DB dBFS, loudness invariata.
# NON si tocca il segnale limitato con nessun gain dopo.
LIMITER_THRESHOLD_DB = 3.0 # dB sotto il ceiling (positivo)
LIMITER_RELEASE_MS = 80.0 # release del limiter sintetico (ms)
# attack = 1 campione → brickwall vero
# Normalizzazione residual — SOLO per comparabilità cross-file.
# Scala entrambi GT e iter identicamente, quindi non altera il confronto.
RESIDUAL_DBFS = -3.0
# ── Rumore rosa di sottofondo ─────────────────────────────────────────────────
# Simula un sottofondo musicale sotto il transiente di batteria.
# Viene mixato al sample (già a 0 dBFS peak) PRIMA del limiter.
# Questo assicura che:
# - il limiter agisca sul segnale realistico drum + music background
# - SPADE riceva lo stesso mix e debba lavorare in condizioni realistiche
# - GT_res = (drum+noise) − limiter(drum+noise) rifletta la situazione reale
# Livello relativo al peak del drum sample. −20 dB = sottofondo ben sotto
# il transiente, udibile ma non dominante (come un kick su un loop di batteria).
PINK_NOISE_LEVEL_DB = -20.0 # dB rel. al peak del drum (negativo = sotto)
# Optuna
STUDY_NAME = "spade_smart_v2_thr3db"
OUT_CSV = "smart_sweep_results.csv"
# Parametri FISSI del solver SPADE (invarianti tra tutti i trial)
FIXED_SOLVER = dict(
algo = "sspade",
frame = "rdft",
mode = "soft",
s = 1,
r = 1,
n_jobs = 1,
verbose = False,
show_progress = False,
use_gpu = True,
# multiband e macro_expand sono nello spazio di ricerca
)
# Crossover multiband (fisso per comparabilita' tra trial)
# 250 Hz separa: LF=corpo/punch del kick | HF=transiente/attacco
BAND_CROSSOVER_HZ = 250.0
# =============================================================================
# HELPERS
# =============================================================================
def ensure_2d(a: np.ndarray) -> np.ndarray:
return a[:, None] if a.ndim == 1 else a
def normalize_to_0dBFS(a: np.ndarray) -> np.ndarray:
"""Scala a 0 dBFS peak — usato solo sull'originale come riferimento comune."""
pk = np.max(np.abs(a))
return a / pk if pk > 1e-12 else a
def normalize_peak(a: np.ndarray, target_dbfs: float) -> np.ndarray:
"""
Scala a target_dbfs dBFS peak.
Usato SOLO sui residual per comparabilità cross-file;
non altera la logica perché GT e iter vengono scalati identicamente.
"""
pk = np.max(np.abs(a))
return a * (10 ** (target_dbfs / 20.0) / pk) if pk > 1e-12 else a
def generate_pink_noise(n_samples: int, n_channels: int, rng: np.random.Generator) -> np.ndarray:
"""
Genera rumore rosa (1/f) tramite filtro IIR di Voss-McCartney (approssimazione
a 5 poli, accurata entro ±1 dB nel range 20 Hz – 20 kHz).
Output: shape (n_samples, n_channels), RMS normalizzato a 1.0 (prima
del mix-in con PINK_NOISE_LEVEL_DB, che controlla il livello finale).
Algoritmo: rumore bianco filtrato con H(z) = 1 / A(z) dove i coefficienti
sono ottimizzati per approssimare una densità spettrale 1/f.
"""
# Coefficienti del filtro IIR a 5 poli (Voss approssimazione)
# Poli reali, tutti stabili (|p| < 1)
b = np.array([0.049922035, -0.095993537, 0.050612699, -0.004408786])
a = np.array([1.0, -2.494956002, 2.017265875, -0.522189400])
out = np.empty((n_samples, n_channels))
for c in range(n_channels):
white = rng.standard_normal(n_samples)
pink = sig.lfilter(b, a, white)
rms = np.sqrt(np.mean(pink ** 2))
out[:, c] = pink / (rms + 1e-12) # RMS = 1.0
return out
def mix_pink_noise(
audio_0dBFS: np.ndarray,
sr: int,
level_db: float,
rng: np.random.Generator,
) -> np.ndarray:
"""
Mixa rumore rosa nel segnale a un livello relativo al suo peak.
level_db < 0 → il rumore è sotto il peak del drum (es. −20 dB)
Il rumore dura quanto il sample; se il sample è stereo, il rumore è stereo
(canali indipendenti → decorrelato come un vero fondo musicale).
Il segnale in uscita può superare 0 dBFS di qualche frazione di dB: è
corretto, il limiter che segue si occupa di riportarlo sotto la soglia.
"""
audio = ensure_2d(audio_0dBFS)
N, C = audio.shape
noise = generate_pink_noise(N, C, rng) # RMS = 1.0 per canale
# Scala il rumore al livello desiderato rispetto al peak del drum
peak = np.max(np.abs(audio))
gain = peak * (10 ** (level_db / 20.0)) # gain lineare assoluto
mixed = audio + noise * gain
# NON normalizziamo qui: la normalizzazione a 0 dBFS avviene in build_corpus
# subito dopo, su tutto il mix (drum + noise), prima di qualsiasi altra op.
return mixed[:, 0] if audio_0dBFS.ndim == 1 else mixed
# =============================================================================
# LIMITER SINTETICO (Case 1 — threshold-based, brickwall, 1-campione attack)
# =============================================================================
def apply_brickwall_limiter(
audio_0dBFS: np.ndarray,
sr: int,
threshold_db: float = LIMITER_THRESHOLD_DB,
release_ms: float = LIMITER_RELEASE_MS,
) -> np.ndarray:
"""
Brickwall limiter threshold-based.
Tenta la GPU (Hillis-Steele parallel prefix scan, O(log N) depth) se
PyTorch + CUDA/ROCm sono disponibili, altrimenti Numba JIT, altrimenti
loop numpy ottimizzato.
Input: audio_0dBFS — già a 0 dBFS peak, shape (N,) o (N, C)
Output: segnale limitato, stessa shape — NON boosted, NON clippato
"""
thr_lin = 10 ** (-abs(threshold_db) / 20.0)
rc = np.exp(-1.0 / max(release_ms * sr / 1000.0, 1e-9))
audio = ensure_2d(audio_0dBFS).copy().astype(np.float32)
N, C = audio.shape
# ── GPU path (preferred) ──────────────────────────────────────────────
try:
import torch
if torch.cuda.is_available():
dev = "cuda"
out = np.empty_like(audio)
for c in range(C):
x_t = torch.from_numpy(audio[:, c]).to(device=dev)
y_t = _brickwall_limiter_gpu(x_t, thr_lin, rc)
out[:, c] = y_t.cpu().numpy()
return out[:, 0] if audio_0dBFS.ndim == 1 else out
except Exception:
pass # fall through to CPU paths
# ── Numba JIT path ────────────────────────────────────────────────────
try:
from numba import njit
@njit(cache=True)
def _limiter_loop_nb(ch: np.ndarray, thr: float, rc: float,
g_out: np.ndarray) -> None:
env = 1.0
for n in range(len(ch)):
pk = abs(ch[n])
target = thr / pk if pk > thr else 1.0
env = target if target < env else rc * env + (1.0 - rc) * target
g_out[n] = env
out = np.empty(audio.shape, dtype=np.float32)
for c in range(C):
g = np.empty(N, dtype=np.float32)
_limiter_loop_nb(audio[:, c].astype(np.float64), thr_lin, rc, g)
out[:, c] = audio[:, c] * g
return out[:, 0] if audio_0dBFS.ndim == 1 else out
except ImportError:
pass
# ── Pure-numpy fallback ───────────────────────────────────────────────
out = np.empty_like(audio)
for c in range(C):
ch = audio[:, c].astype(np.float64)
pk = np.abs(ch)
g_instant = np.where(pk > thr_lin, thr_lin / np.maximum(pk, 1e-12), 1.0)
g = np.empty(N)
env = 1.0
gi = g_instant
for n in range(N):
t = gi[n]
env = t if t < env else rc * env + (1.0 - rc) * t
g[n] = env
out[:, c] = ch * g
return out[:, 0] if audio_0dBFS.ndim == 1 else out
# =============================================================================
# COSINE SIMILARITY TF
# =============================================================================
def cosine_sim_tf(
gt: np.ndarray,
est: np.ndarray,
sr: int,
win_samples: int = 1024,
hop_samples: int = 256,
n_bands: int = 12,
) -> float:
"""
Similarità coseno media su micro-finestre tempo-frequenziali.
Input: entrambi già a RESIDUAL_DBFS peak.
Output: scalare in [0, 1]. Target ideale = 1.0.
"""
L = min(gt.shape[0], est.shape[0])
g = (gt[:L, 0] if gt.ndim == 2 else gt[:L]).copy()
e = (est[:L, 0] if est.ndim == 2 else est[:L]).copy()
win = min(win_samples, max(32, L // 4))
hop = min(hop_samples, win // 2)
if L < win or win < 32:
denom = np.linalg.norm(g) * np.linalg.norm(e) + 1e-12
return float(np.dot(g, e) / denom)
_, _, Zg = sig.stft(g, fs=sr, window="hann",
nperseg=win, noverlap=win - hop,
boundary=None, padded=False)
_, _, Ze = sig.stft(e, fs=sr, window="hann",
nperseg=win, noverlap=win - hop,
boundary=None, padded=False)
n_freqs, n_frames = Zg.shape
if n_frames == 0:
return float(np.dot(g, e) / (np.linalg.norm(g) * np.linalg.norm(e) + 1e-12))
edges = np.unique(np.round(
np.logspace(0, np.log10(max(n_freqs, 2)), min(n_bands, n_freqs) + 1)
).astype(int))
edges = np.clip(edges, 0, n_freqs)
sims = []
for i in range(len(edges) - 1):
f0, f1 = int(edges[i]), int(edges[i + 1])
if f1 <= f0:
continue
Mg = np.abs(Zg[f0:f1, :])
Me = np.abs(Ze[f0:f1, :])
dot = np.sum(Mg * Me, axis=0)
norm_g = np.sqrt(np.sum(Mg ** 2, axis=0)) + 1e-12
norm_e = np.sqrt(np.sum(Me ** 2, axis=0)) + 1e-12
sims.extend((dot / (norm_g * norm_e)).tolist())
return float(np.mean(sims)) if sims else 0.0
# =============================================================================
# CORPUS
# =============================================================================
def build_corpus(base_dir: Path, max_files: Optional[int] = None) -> List[Dict]:
"""
Per ogni drum sample:
1. Carica e normalizza a 0 dBFS peak (riferimento comune cross-file)
2. Mixa rumore rosa a PINK_NOISE_LEVEL_DB rel. al peak ← NUOVO
Il mix avviene in float (può temporaneamente superare 0 dBFS)
3. Normalizza il mix (drum + noise) a 0 dBFS peak
Riferimento comune prima di tutta la pipeline successiva
4. Applica limiter sintetico su (drum + noise) normalizzato → limited
4. GT_res_raw = (drum + noise) − limited (stessa scala, nessun gain)
5. Scarta file dove il limiter non interviene
6. Normalizza GT_res a RESIDUAL_DBFS (solo comparabilità cross-file)
Il rumore è riproducibile: ogni file usa un seed deterministico derivato
dal suo indice nel corpus, così i trial sono comparabili tra loro.
"""
corpus = []
extensions = {".wav", ".flac", ".aif", ".aiff"}
file_index = 0 # usato per seed deterministico del rumore
for folder in DRUM_DIRS:
d = base_dir / folder
if not d.exists():
print(f" [WARN] Cartella non trovata: {d}")
continue
for f in sorted(d.glob("*")):
if f.suffix.lower() not in extensions:
continue
try:
audio, sr = sf.read(str(f), always_2d=True)
audio = audio.astype(float)
except Exception as exc:
print(f" [WARN] {f.name}: {exc}")
continue
if audio.shape[0] < 64:
continue
# 1. 0 dBFS peak
orig = normalize_to_0dBFS(audio)
# 2. Mix rumore rosa — seed deterministico per riproducibilità
rng = np.random.default_rng(seed=file_index)
orig_with_noise = ensure_2d(mix_pink_noise(orig, sr,
PINK_NOISE_LEVEL_DB, rng))
file_index += 1
# 3. Normalizza il mix a 0 dBFS peak — riferimento comune prima
# di tutta la pipeline. Il mix in float può aver superato 0 dBFS;
# questa normalizzazione azzera il problema prima del limiter.
orig_with_noise = ensure_2d(normalize_to_0dBFS(orig_with_noise))
# 4. Limiter sintetico su (drum + noise) @0dBFS — nessun gain dopo
limited = ensure_2d(apply_brickwall_limiter(orig_with_noise, sr))
# 5. Residual grezzo — stessa scala, zero aggiustamenti
gt_res_raw = orig_with_noise - limited
# 6. Verifica attività del limiter
if np.max(np.abs(gt_res_raw)) < 1e-6:
print(f" [SKIP] {f.name} — picco sotto la soglia, limiter inattivo")
continue
# 7. Normalizza a RESIDUAL_DBFS solo per comparabilità cross-file
gt_res = normalize_peak(gt_res_raw, RESIDUAL_DBFS)
corpus.append({
"file" : f.name,
"sr" : sr,
"limited" : limited, # input a SPADE = drum + noise + limiter
"gt_res" : gt_res, # target residual
})
if max_files and len(corpus) >= max_files:
return corpus
return corpus
# =============================================================================
# VALUTAZIONE SINGOLO FILE
# =============================================================================
def evaluate_one(item: Dict, params: dict) -> Optional[float]:
"""
Esegue SPADE su limited, calcola il residual e lo confronta con GT.
params contiene parametri SPADE puri + flag di alto livello:
multiband (bool) -- split LF/HF, elabora separatamente
macro_expand (bool) -- envelope pre-pass per recupero corpo LF
macro_ratio (float) -- rapporto espansione (1.0 = bypass)
lf_delta_db (float) -- delta_db per banda LF (<= BAND_CROSSOVER_HZ)
il delta_db standard e' usato per la banda HF
lf_cutoff_hz (float) -- v12: Hz sotto cui riservare bin LF (0 = off)
lf_k_min (int) -- v12: slot LF garantiti per iterazione ADMM
"""
try:
sr = item["sr"]
limited = item["limited"].copy()
gt_res = item["gt_res"]
# Estrai flag di alto livello (non sono parametri DeclipParams diretti)
p2 = dict(params) # copia per non mutare l'originale
multiband = p2.pop("multiband", False)
macro_expand = p2.pop("macro_expand", False)
macro_ratio = p2.pop("macro_ratio", 1.0)
lf_delta_db = p2.pop("lf_delta_db", p2.get("delta_db", 1.5))
# v12: stratified thresholding params — passati direttamente a DeclipParams
# (già nel dict p2, non richiedono pop separato)
spade_kw = dict(
multiband = multiband,
macro_expand = macro_expand,
macro_ratio = macro_ratio if macro_expand else 1.0,
macro_release_ms = 200.0,
macro_attack_ms = 10.0,
)
if multiband:
spade_kw["band_crossovers"] = (BAND_CROSSOVER_HZ,)
spade_kw["band_delta_db"] = (lf_delta_db, p2["delta_db"])
p = DeclipParams(sample_rate=sr, **FIXED_SOLVER, **p2, **spade_kw)
fixed, _ = declip(limited, p)
fixed_2d = ensure_2d(fixed)
# Residual generato — stessa scala dell'input, nessun gain
res_raw = fixed_2d - limited
res_iter = normalize_peak(res_raw, RESIDUAL_DBFS)
# GPU cosine sim when available, CPU fallback otherwise
try:
import torch
g = gt_res[:, 0] if gt_res.ndim == 2 else gt_res
e = (res_iter[:, 0] if res_iter.ndim == 2 else res_iter).astype(np.float32)
dev = "cuda" if torch.cuda.is_available() else "cpu"
g_t = torch.from_numpy(g.astype(np.float32)).to(dev)
e_t = torch.from_numpy(e).to(dev)
Lmin = min(g_t.shape[0], e_t.shape[0])
return _cosine_sim_gpu(g_t[:Lmin], e_t[:Lmin])
except Exception:
return cosine_sim_tf(gt_res, res_iter, sr)
except Exception as exc:
warnings.warn(f"evaluate_one ({item['file']}): {exc}")
return None
# =============================================================================
# GPU MEGA-BATCH (v12 — AMD RX 6700 XT optimisation)
# =============================================================================
# =============================================================================
# GPU PIPELINE — tutti i pass su GPU (v13)
# =============================================================================
#
# Architettura
# ------------
# _brickwall_limiter_gpu : limiter con Hillis-Steele parallel prefix scan
# _compute_masks_gpu : boolean tensor ops
# _dilate_masks_gpu : F.max_pool1d invece di np.convolve
# _extract_frames_gpu : tensor.unfold → frame batch senza loop Python
# _wola_gpu : scatter_add_ → overlap-add senza loop Python
# _rms_match_gpu : F.max_pool1d per near-clip + ops tensore
# _cosine_sim_gpu : torch.stft → cosine sim senza scipy
# evaluate_corpus_gpu_mega : pipeline completa — zero numpy nel hot-path
# =============================================================================
def _brickwall_limiter_gpu(
audio_t: "torch.Tensor", # (L,) o (C, L) float32
thr_lin: float,
rc: float,
) -> "torch.Tensor":
"""
Brickwall limiter su GPU tramite Hillis-Steele parallel prefix scan.
Recurrence (causal):
env[n] = min(target[n], rc * env[n-1] + (1-rc) * target[n])
Rappresentazione funzione clamp-lineare f(y) = min(t, r*y + c):
- r_n = rc, c_n = (1-rc)*target[n], t_n = target[n]
Operatore di composizione h_a ⋆ h_b (applica h_a prima, poi h_b):
r_ab = r_a * r_b
c_ab = r_b * c_a + c_b
t_ab = min(t_b, r_b * t_a + c_b)
Hillis-Steele inclusive prefix scan con ⋆ → O(log N) depth.
Risultato: scan[n] = f_0 ⋆ f_1 ⋆ ... ⋆ f_n
env[n] = scan[n](env_init=1.0) = min(t_prefix[n], r_prefix[n] + c_prefix[n])
"""
import torch, math as _m
squeeze = audio_t.dim() == 1
if squeeze:
audio_t = audio_t.unsqueeze(0) # (1, L)
C, L = audio_t.shape
dev = audio_t.device
dt = audio_t.dtype
# ── Step 1: instantaneous gain target (fully parallel) ────────────────
pk = audio_t.abs().clamp(min=1e-12)
target = (thr_lin / pk).clamp(max=1.0) # (C, L)
# ── Step 2: Hillis-Steele prefix scan ─────────────────────────────────
inv_rc = 1.0 - rc
# Each position n represents f_n: (r=rc, c=(1-rc)*target[n], t=target[n])
r = torch.full((C, L), rc, device=dev, dtype=dt)
c = target * inv_rc # (C, L)
t = target.clone() # (C, L)
d = 1
while d < L:
# Clone previous step (Hillis-Steele requires read-before-write)
r_p = r.clone()
c_p = c.clone()
t_p = t.clone()
# For positions i >= d: scan[i] = scan_prev[i-d] ⋆ scan_prev[i]
r_l = r_p[:, :-d]; c_l = c_p[:, :-d]; t_l = t_p[:, :-d]
r_r = r_p[:, d:]; c_r = c_p[:, d:]; t_r = t_p[:, d:]
r[:, d:] = r_l * r_r
c[:, d:] = r_r * c_l + c_r
t[:, d:] = torch.minimum(t_r, r_r * t_l + c_r)
d *= 2
# ── Step 3: evaluate at env_init = 1.0 ───────────────────────────────
env = torch.minimum(t, r + c) # (C, L)
out = audio_t * env
return out.squeeze(0) if squeeze else out
def _compute_masks_gpu(
yc_t: "torch.Tensor", # (L,) float
thresh: float,
) -> "tuple[torch.Tensor, torch.Tensor, torch.Tensor]":
"""GPU version of _compute_masks. Returns (Ir, Icp, Icm) bool tensors."""
Icp = yc_t >= thresh
Icm = yc_t <= -thresh
Ir = ~(Icp | Icm)
return Ir, Icp, Icm
def _dilate_masks_gpu(
Icp_t: "torch.Tensor", # (L,) bool
Icm_t: "torch.Tensor", # (L,) bool
yc_t: "torch.Tensor", # (L,) float
rel_samp: int,
) -> "tuple[torch.Tensor, torch.Tensor, torch.Tensor]":
"""
GPU forward morphological dilation of soft-mode masks.
Replaces np.convolve(..., ones(rel_samp+1))[:N] > 0 with
F.max_pool1d(causal_pad, kernel=rel_samp+1, stride=1).
Causal dilation: each True in Icp/Icm infects the next rel_samp positions.
Equivalent to convolving with a boxcar of length rel_samp+1 (causal).
max_pool1d with left-padding of rel_samp achieves this.
"""
import torch.nn.functional as F
if rel_samp <= 0:
return ~(Icp_t | Icm_t), Icp_t, Icm_t
L = yc_t.shape[0]
k = rel_samp + 1 # kernel size matching np.ones(rel_samp + 1)
def _dilate(mask_t):
# (1, 1, L) → pad left by rel_samp → max_pool(kernel=k, stride=1) → (1,1,L)
x = mask_t.float().unsqueeze(0).unsqueeze(0) # (1, 1, L)
x = F.pad(x, (rel_samp, 0), value=0.0) # left-pad for causality
x = F.max_pool1d(x, kernel_size=k, stride=1) # (1, 1, L)
return x.squeeze().bool()[:L]
dil_union = _dilate(Icp_t | Icm_t) # any clipped → forward dilation
new_Icp = dil_union & (yc_t >= 0)
new_Icm = dil_union & (yc_t < 0)
new_Ir = ~(new_Icp | new_Icm)
return new_Ir, new_Icp, new_Icm
def _extract_frames_gpu(
yc_t: "torch.Tensor", # (L,) float — DC-removed, normalised
Ir_t: "torch.Tensor", # (L,) bool
Icp_t: "torch.Tensor", # (L,) bool
Icm_t: "torch.Tensor", # (L,) bool
M: int,
a: int,
win_t: "torch.Tensor", # (M,) float
thresh: float,
) -> "tuple":
"""
GPU frame extraction using tensor.unfold — zero Python loops.
Returns
-------
yc_active : (n_active, M) float — windowed frames for SPADE
Ir_active : (n_active, M) bool
Icp_active : (n_active, M) bool
Icm_active : (n_active, M) bool
is_active : (N,) bool — bypass mask for ALL N frames
N : int — total number of frames
idx1s_t : (N,) long — start indices for WOLA
"""
import torch.nn.functional as F
L = yc_t.shape[0]
import math
N = math.ceil(L / a)
dev = yc_t.device
# Pad to N*a + M to ensure all frames are exactly M samples
pad_len = N * a + M - L
yc_pad = F.pad(yc_t, (0, pad_len), value=0.0)
Ir_pad = F.pad(Ir_t.float(), (0, pad_len), value=1.0).bool()
Icp_pad = F.pad(Icp_t.float(),(0, pad_len), value=0.0).bool()
Icm_pad = F.pad(Icm_t.float(),(0, pad_len), value=0.0).bool()
# unfold: (L_padded,) → (N, M) — zero-copy strided view
yc_frames = yc_pad.unfold(0, M, a) # (N, M)
Ir_frames = Ir_pad.unfold(0, M, a) # (N, M) bool
Icp_frames = Icp_pad.unfold(0, M, a)
Icm_frames = Icm_pad.unfold(0, M, a)
# Per-frame peak → bypass decision (fully parallel)
frame_peaks = yc_frames.abs().amax(dim=-1) # (N,)
is_active = frame_peaks >= thresh # (N,) bool
# Active frames
yc_active = yc_frames [is_active] * win_t # (n_active, M) windowed
Ir_active = Ir_frames [is_active]
Icp_active = Icp_frames[is_active]
Icm_active = Icm_frames[is_active]
idx1s_t = torch.arange(N, device=dev, dtype=torch.long) * a # (N,)
return yc_active, Ir_active, Icp_active, Icm_active, is_active, N, idx1s_t
def _wola_gpu(
x_active_t: "torch.Tensor", # (n_active, M) float — SPADE output
is_active: "torch.Tensor", # (N,) bool
idx1s_t: "torch.Tensor", # (N,) long — frame start indices
yc_t: "torch.Tensor", # (L,) float — original signal (for bypass)
win_t: "torch.Tensor", # (M,) float
L: int,
M: int,
) -> "torch.Tensor":
"""
GPU WOLA overlap-add via scatter_add_ — zero Python loops.
Bypassed frames accumulate yc * win^2.
Active frames accumulate x_spade * win.
norm_win accumulates win^2 for ALL frames.
"""
import torch.nn.functional as F
dev = x_active_t.device
dt = x_active_t.dtype
win2 = win_t ** 2 # (M,)
N = idx1s_t.shape[0]
# Index matrix for ALL N frames: (N, M)
col = torch.arange(M, device=dev, dtype=torch.long)
idx_mat = idx1s_t.unsqueeze(1) + col.unsqueeze(0) # (N, M)
# Output buffers (L+M to avoid OOB)
x_out = torch.zeros(L + M, device=dev, dtype=dt)
norm_out = torch.zeros(L + M, device=dev, dtype=dt)
# norm_win: all N frames contribute win^2
norm_vals = win2.unsqueeze(0).expand(N, -1) # (N, M)
norm_out.scatter_add_(0, idx_mat.reshape(-1), norm_vals.reshape(-1))
# Bypassed frames: yc * win^2
byp_mask = ~is_active
if byp_mask.any():
byp_idx = idx_mat[byp_mask] # (n_byp, M)
yc_pad = F.pad(yc_t, (0, M)) # (L+M,)
byp_yc = yc_pad[byp_idx] # (n_byp, M) — gather
byp_val = byp_yc * win2.unsqueeze(0)
x_out.scatter_add_(0, byp_idx.reshape(-1), byp_val.reshape(-1))
# Active frames: x_spade * win
if is_active.any():
act_idx = idx_mat[is_active] # (n_active, M)
act_val = x_active_t.to(dt) * win_t.unsqueeze(0) # (n_active, M)
x_out.scatter_add_(0, act_idx.reshape(-1), act_val.reshape(-1))
norm_clamped = norm_out[:L].clamp(min=1e-12)
return x_out[:L] / norm_clamped
def _rms_match_gpu(
x_t: "torch.Tensor", # (L,) float — reconstructed signal
yc_t: "torch.Tensor", # (L,) float — input (DC-removed)
Ir_t: "torch.Tensor", # (L,) bool — reliable samples
M: int,
) -> "torch.Tensor":
"""
GPU reliable-sample RMS match (v12 safe-Ir).
Replaces np.convolve(...) for near-clip detection with F.max_pool1d.
All ops stay on GPU — returns rescaled x_t tensor.
"""
import torch.nn.functional as F
if Ir_t.sum() == 0:
return x_t
# Near-clip: any Ir sample within M of a clip boundary is "contaminated"
clip_f = (~Ir_t).float().unsqueeze(0).unsqueeze(0) # (1, 1, L)
near = F.max_pool1d(clip_f, M, stride=1,
padding=M // 2).squeeze()[:len(Ir_t)] > 0
safe_Ir = Ir_t & ~near
use_Ir = safe_Ir if safe_Ir.sum() >= 100 else Ir_t
rms_in = yc_t[use_Ir].pow(2).mean().sqrt()
rms_out = x_t [use_Ir].pow(2).mean().sqrt()
if rms_out > 1e-12 and rms_in > 1e-12:
x_t = x_t * (rms_in / rms_out)
return x_t
def _cosine_sim_gpu(
gt_t: "torch.Tensor", # (L,) float — GT residual
est_t: "torch.Tensor", # (L,) float — estimated residual
win_samples: int = 1024,
hop_samples: int = 256,
) -> float:
"""
GPU cosine similarity via torch.stft.
Replaces scipy.signal.stft + numpy band loops with a single GPU STFT
call and vectorised band computation. Returns float in [0, 1].
"""
import torch
L = min(gt_t.shape[0], est_t.shape[0])
g = gt_t [:L].float()
e = est_t[:L].float()
dev = g.device
win_s = min(win_samples, max(32, L // 4))
hop_s = min(hop_samples, win_s // 2)
if L < win_s or win_s < 32:
denom = g.norm() * e.norm() + 1e-12
return (g * e).sum().item() / denom.item()
window = torch.hann_window(win_s, device=dev)
# torch.stft: input (L,) → (F, T) complex
Zg = torch.stft(g, win_s, hop_s, window=window,
return_complex=True, normalized=False) # (F, T)
Ze = torch.stft(e, win_s, hop_s, window=window,
return_complex=True, normalized=False)
Mg = Zg.abs() # (F, T)
Me = Ze.abs() # (F, T)
dot = (Mg * Me).sum(dim=0) # (T,)
norm_g = Mg.norm(dim=0).clamp(min=1e-12) # (T,)
norm_e = Me.norm(dim=0).clamp(min=1e-12) # (T,)
return (dot / (norm_g * norm_e)).mean().item()
# ── GPU corpus cache — upload limited arrays to GPU once per build_corpus ──
# Keyed by (id(item), device_str). Cleared at program exit.
_GPU_CORPUS_CACHE: dict = {}
def evaluate_corpus_gpu_mega(
items: List[Dict],
params_dict: dict,
device: str,
) -> List[Optional[float]]:
"""
Pipeline interamente GPU — v13.
Pass 0 Carica i tensor GPU dal cache (upload one-time per corpus build).
Pass 1 Per ogni item: normalise + DC + masks + dilation + unfold (GPU).
Raccoglie i frame attivi nel mega-tensor.
Pass 2 _sspade_batch_gpu — invariato, già GPU.
Pass 3 Per ogni item: WOLA + RMS match + cosine sim (GPU).
Nessun trasferimento GPU→CPU fino ai punteggi finali.
Rispetto a v12:
- Eliminati tutti i loop Python nel hot-path
- ThreadPoolExecutor rimosso (serializzazione GPU è già il collo di bottiglia)
- numpy utilizzato SOLO per l'inizializzazione degli array corpus (build_corpus)
e per raccogliere i punteggi finali (una .item() per file)
"""
try:
import torch
import torch.nn.functional as F
from scipy.signal import hann as _hann
import math as _m
except ImportError:
return [evaluate_one(item, dict(params_dict)) for item in items]
# ── Extract flags ─────────────────────────────────────────────────────
p2 = dict(params_dict)
multiband = p2.pop("multiband", False)
macro_expand = p2.pop("macro_expand", False)
macro_ratio = p2.pop("macro_ratio", 1.0)
lf_delta_db = p2.pop("lf_delta_db", p2.get("delta_db", 1.5))
if multiband:
return [evaluate_one(item, dict(params_dict)) for item in items]
# ── Build DeclipParams ────────────────────────────────────────────────
sr_ref = items[0]["sr"]
spade_kw = dict(
macro_expand=macro_expand,
macro_ratio=macro_ratio if macro_expand else 1.0,
macro_release_ms=200.0,
macro_attack_ms=10.0,
)
try:
p = DeclipParams(sample_rate=sr_ref, **FIXED_SOLVER, **p2, **spade_kw)
except Exception as exc:
warnings.warn(f"evaluate_corpus_gpu_mega: DeclipParams error: {exc}")
return [None] * len(items)
M = p.window_length
a = p.hop_length
NORM_TGT = 0.9
win_np = np.sqrt(_hann(M, sym=False)).astype(np.float32)
win_t = torch.from_numpy(win_np).to(device=device) # (M,) on GPU
# ── LF mask tensor ────────────────────────────────────────────────────
lf_mask_t = None
if p.lf_cutoff_hz > 0.0 and p.lf_k_min > 0:
lf_mask_np = _build_lf_mask(M, p.frame, sr_ref, p.lf_cutoff_hz)
lf_mask_t = torch.tensor(lf_mask_np, dtype=torch.bool, device=device)
g_max = (10.0 ** (p.max_gain_db / 20.0) if p.max_gain_db > 0.0
else float("inf"))
# ── Pass 0: GPU corpus cache ──────────────────────────────────────────
# Upload item["limited"] to GPU once; reuse across trials.
limited_gpu: list = []
gt_res_gpu: list = []
for item in items:
key = (id(item), device)
if key not in _GPU_CORPUS_CACHE:
ltd = np.asarray(item["limited"], dtype=np.float32)
if ltd.ndim == 2:
ltd = ltd[:, 0] # take L channel; corpus is mono-per-item
_GPU_CORPUS_CACHE[key] = torch.from_numpy(ltd).to(device=device)
limited_gpu.append(_GPU_CORPUS_CACHE[key])
gt_key = (id(item), device, "gt")
if gt_key not in _GPU_CORPUS_CACHE:
gt = np.asarray(item["gt_res"], dtype=np.float32)
if gt.ndim == 2:
gt = gt[:, 0]
_GPU_CORPUS_CACHE[gt_key] = torch.from_numpy(gt).to(device=device)
gt_res_gpu.append(_GPU_CORPUS_CACHE[gt_key])
# ── Pass 1: GPU preprocessing + frame extraction ──────────────────────
# Process items sequentially; each step is a GPU kernel (no Python per-sample).
item_states: list = []
all_yc_active: list = [] # (n_i, M) tensors — will be cat'd
all_Ir_active: list = []
all_Icp_active: list = []
all_Icm_active: list = []
for i, item in enumerate(items):
try:
sr = item["sr"]
yc_orig = limited_gpu[i] # (L,) on GPU
# Normalise
gp = float(yc_orig.abs().max().item())
if gp > NORM_TGT:
scale = NORM_TGT / gp
yc = yc_orig * scale
else:
scale = 1.0
yc = yc_orig
# DC removal
dc = float(yc.mean().item())
yc = yc - dc
# Ceiling + threshold (GPU scalars)
ceiling = float(torch.maximum(yc.max(), (-yc).max()).item())
thresh = ceiling * (10.0 ** (-p.delta_db / 20.0))
if thresh <= 0.0:
item_states.append(None)
continue
# Masks — GPU boolean ops
Ir_t, Icp_t, Icm_t = _compute_masks_gpu(yc, thresh)
# Mask dilation — GPU max_pool
if p.release_ms > 0.0:
rs = max(0, round(p.release_ms * sr / 1000.0))
if rs > 0:
Ir_t, Icp_t, Icm_t = _dilate_masks_gpu(Icp_t, Icm_t, yc, rs)
# Macro expand — still CPU via imported function; runs on numpy
if macro_expand and macro_ratio > 1.0:
yc_np = yc.cpu().numpy().astype(float)
yc_np = _macro_expand_pass(yc_np, sr,
attack_ms=p.macro_attack_ms,
release_ms=p.macro_release_ms,
ratio=macro_ratio)
yc = torch.from_numpy(yc_np.astype(np.float32)).to(device=device)
Ir_t, Icp_t, Icm_t = _compute_masks_gpu(yc, thresh)
if p.release_ms > 0.0 and rs > 0:
Ir_t, Icp_t, Icm_t = _dilate_masks_gpu(Icp_t, Icm_t, yc, rs)
L = yc.shape[0]
# Frame extraction — GPU unfold
yc_act, Ir_act, Icp_act, Icm_act, is_active, N, idx1s_t = \
_extract_frames_gpu(yc, Ir_t, Icp_t, Icm_t, M, a, win_t, thresh)
frame_offset = sum(s["n_active"] for s in item_states if s is not None)
n_active = int(is_active.sum().item())
item_states.append({
"yc": yc,
"scale": scale,
"Ir_t": Ir_t,
"L": L,
"is_active": is_active,
"N": N,
"idx1s_t": idx1s_t,
"frame_offset":frame_offset,
"n_active": n_active,
"gt_t": gt_res_gpu[i],
"limited_t": limited_gpu[i],
"sr": sr,
})
if n_active > 0:
all_yc_active .append(yc_act)
all_Ir_active .append(Ir_act)
all_Icp_active.append(Icp_act)
all_Icm_active.append(Icm_act)
except Exception as exc:
warnings.warn(f"evaluate_corpus_gpu_mega preprocess ({item['file']}): {exc}")
item_states.append(None)
if not all_yc_active:
return [None] * len(items)
# Concatenate into mega-batch — single GPU allocation
yc_mega = torch.cat(all_yc_active, dim=0) # (total_active, M)
Ir_mega = torch.cat(all_Ir_active, dim=0)
Icp_mega = torch.cat(all_Icp_active, dim=0)
Icm_mega = torch.cat(all_Icm_active, dim=0)
total_frames = yc_mega.shape[0]
total_meta = sum(s["N"] for s in item_states if s is not None)
bypass_frames = total_meta - total_frames
vram_mb = total_frames * M * 4 * 4 / 1024 ** 2
print(f" [mega-batch] {total_frames} active / {total_meta} total frames "
f"({100*bypass_frames/max(total_meta,1):.0f}% bypassed) "
f"≈{vram_mb:.0f} MB GPU")
# ── Pass 2 (GPU): _sspade_batch_gpu — unchanged ───────────────────────
try:
x_mega, _ = _sspade_batch_gpu(
yc_mega, Ir_mega, Icp_mega, Icm_mega,
p.frame, p.s, p.r, p.eps, p.max_iter,
g_max=g_max, lf_mask_t=lf_mask_t, k_lf_min=p.lf_k_min,
gpu_dtype=getattr(p, "gpu_dtype", "float32"),
)
except Exception as exc:
warnings.warn(f"evaluate_corpus_gpu_mega GPU pass: {exc}")
return [None] * len(items)
finally:
del yc_mega, Ir_mega, Icp_mega, Icm_mega
# ── Pass 3 (GPU): WOLA + RMS match + cosine sim ───────────────────────
# All operations stay on GPU. Only .item() at the very end to get the score.
scores: List[Optional[float]] = []
NORM_LIN = 10.0 ** (RESIDUAL_DBFS / 20.0)
for state in item_states:
if state is None:
scores.append(None)
continue
try:
yc = state["yc"] # (L,) float GPU
scale = state["scale"]
L = state["L"]
Ir_t = state["Ir_t"]
is_active= state["is_active"] # (N,) bool
idx1s_t = state["idx1s_t"] # (N,) long
f_off = state["frame_offset"]
n_act = state["n_active"]
gt_t = state["gt_t"] # (L_gt,) float GPU
ltd_t = state["limited_t"] # (L,) float GPU
sr = state["sr"]
# Slice active frames for this item
x_item = x_mega[f_off:f_off + n_act] if n_act > 0 \
else torch.empty((0, M), device=device)
# GPU WOLA
x_t = _wola_gpu(x_item, is_active, idx1s_t, yc, win_t, L, M)
# GPU RMS match
x_t = _rms_match_gpu(x_t, yc, Ir_t, M)
# Un-scale
x_t = x_t / scale
# Residual — GPU subtraction
ltd_ch = ltd_t[:L] # align lengths
res_raw = x_t - ltd_ch
# Normalise to RESIDUAL_DBFS (GPU)
pk = res_raw.abs().max().clamp(min=1e-12)
res_norm = res_raw * (NORM_LIN / pk)
# Align with gt_t
gt_ch = gt_t[:, 0] if gt_t.dim() == 2 else gt_t
Lmin = min(res_norm.shape[0], gt_ch.shape[0])
# GPU cosine sim via torch.stft
sc = _cosine_sim_gpu(gt_ch[:Lmin], res_norm[:Lmin],
win_samples=1024, hop_samples=256)
scores.append(sc)
except Exception as exc:
warnings.warn(f"evaluate_corpus_gpu_mega WOLA/score ({item['file']}): {exc}")
scores.append(None)
return scores
# OBIETTIVO OPTUNA
# =============================================================================
def make_objective(corpus: List[Dict]):
def objective(trial: "optuna.Trial") -> float:
# ── Parametri core ────────────────────────────────────────────────
delta_db = trial.suggest_float("delta_db", 1.5, 3.5, step=0.05)
win_exp = trial.suggest_int ("win_exp", 9, 11)
win = 2 ** win_exp
hop_div = trial.suggest_categorical("hop_div", [4, 8])
hop = win // hop_div
rel_ms = trial.suggest_float("release_ms", 10.0, 200.0, step=5.0)
gain_db = trial.suggest_float("max_gain_db", 2.0, 12.0, step=0.5)
eps = trial.suggest_categorical("eps", [0.03, 0.05, 0.1])
max_iter = trial.suggest_categorical("max_iter", [250, 500, 1000])
# ── Multiband + Macro expand ────────────────────────────────────────
# SPAZIO STATICO: lf_delta_db e macro_ratio vengono SEMPRE campionati
# dal TPE (spazio fisso) e poi usati condizionalmente a runtime.
# Questo elimina il fallback a RandomSampler che degradava le performance
# del TPE multivariate con spazi dinamici.
multiband = trial.suggest_categorical("multiband", [False, True])
macro_expand = trial.suggest_categorical("macro_expand", [False, True])
# Sempre campionati (range fisso), usati solo se il flag e' True:
lf_delta_db = trial.suggest_float("lf_delta_db", 0.5, 2.0, step=0.05)
macro_ratio = trial.suggest_float("macro_ratio", 1.1, 2.0, step=0.05)
# ── v12: frequency-stratified thresholding ─────────────────────────
# lf_cutoff_hz: soglia in Hz che separa i bin "LF garantiti" dagli HF.
# Con M=512, sr=44100: bin_k = k * sr / (2M) → lf_cutoff=1000Hz → 23 bin LF.
# lf_k_min: quanti di quei bin sono garantiti per ogni iterazione ADMM.
# 0 = disabilitato (comportamento identico a v11).
lf_cutoff_hz = trial.suggest_categorical("lf_cutoff_hz", [0.0, 500.0, 1000.0, 2000.0])
lf_k_min = trial.suggest_int("lf_k_min", 0, 16)
# Nota: quando lf_cutoff_hz=0 oppure lf_k_min=0, la feature e' disabilitata.
# Il TPE impara autonomamente quando conviene attivarla.
# Se multiband=False, lf_delta_db viene ignorato in evaluate_one.
# Se macro_expand=False, macro_ratio viene ignorato in evaluate_one.
params = dict(
delta_db = delta_db,
window_length = win,
hop_length = hop,
release_ms = rel_ms,
max_gain_db = gain_db,
eps = eps,
max_iter = max_iter,
# flag di alto livello (estratti in evaluate_one, non passati raw)
multiband = multiband,
lf_delta_db = lf_delta_db,
macro_expand = macro_expand,
macro_ratio = macro_ratio,
# v12: passati direttamente a DeclipParams (non estratti in evaluate_one)
lf_cutoff_hz = lf_cutoff_hz,
lf_k_min = lf_k_min,
)
scores = []
# ── Shuffle per-trial con seed riproducibile ──────────────────────
rng_shuffle = np.random.default_rng(trial.number)
shuffled_corpus = rng_shuffle.permutation(len(corpus)).tolist()
midpoint = len(corpus) // 2
ordered_items = [corpus[idx] for idx in shuffled_corpus]
# ── GPU mega-batch: tutti i frame del corpus in un solo kernel ────
# Rileva il device GPU disponibile per questa chiamata.
# Se non disponibile, evaluate_corpus_gpu_mega ricade su evaluate_one.
_gpu_dev = "cpu"
try:
import torch
if torch.cuda.is_available():
_gpu_dev = "cuda"
except ImportError:
pass
# Primo metà del corpus → prune check → seconda metà
# (preserva il beneficio del MedianPruner senza N kernel separati)
first_half = ordered_items[:midpoint + 1]
second_half = ordered_items[midpoint + 1:]
scores_first = evaluate_corpus_gpu_mega(first_half, dict(params), _gpu_dev)
scores = [sc for sc in scores_first if sc is not None]
if scores:
trial.report(float(np.mean(scores)), step=midpoint)
if trial.should_prune():
raise optuna.TrialPruned()
if second_half:
scores_second = evaluate_corpus_gpu_mega(second_half, dict(params), _gpu_dev)
scores.extend(sc for sc in scores_second if sc is not None)
if not scores:
return 0.0
mean_score = float(np.mean(scores))
trial.report(mean_score, step=len(corpus))
return mean_score
return objective
# =============================================================================
# REPORT + CSV
# =============================================================================
def print_report(study: "optuna.Study", top_n: int = 20):
trials = sorted(
[t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE],
key=lambda t: t.value or 0, reverse=True,
)
if not trials:
print("Nessun trial completato.")
return
if _HAS_RICH:
_console.rule("[bold cyan]RISULTATI SWEEP BAYESIANO[/]")
tbl = Table(show_header=True, header_style="bold cyan", show_lines=False)
for col, w in [("#",4),("score",9),("ddb",6),("LFd",5),("win",6),
("hop",4),("rel",6),("gain",6),("eps",5),("iter",5),
("MB",3),("ME",3),("MR",5),("LFcut",6),("LFk",4)]:
tbl.add_column(col, justify="right", width=w)
for rank, t in enumerate(trials[:top_n], 1):
p = t.params
win = 2 ** p["win_exp"]
hop = win // p["hop_div"]
mb = "Y" if p.get("multiband") else "n"
me = "Y" if p.get("macro_expand") else "n"
lfc = p.get("lf_cutoff_hz", 0.0)
lfk = p.get("lf_k_min", 0)
sty = "bold green" if rank == 1 else ("yellow" if rank <= 3 else "")
tbl.add_row(
str(rank), f"{t.value:.5f}",
f"{p['delta_db']:.2f}",
f"{p.get('lf_delta_db', p['delta_db']):.2f}",
str(win), str(hop),
f"{p['release_ms']:.0f}", f"{p['max_gain_db']:.1f}",
str(p['eps']), str(p['max_iter']),
mb, me, f"{p.get('macro_ratio', 1.0):.2f}",
f"{lfc:.0f}", str(lfk),
style=sty,
)
_console.print(tbl)
else:
hdr = (f"{'#':>3} {'score':>8} {'ddb':>5} {'LFd':>5} {'win':>5}"
f" {'hop':>4} {'rel':>6} {'gain':>5} {'eps':>5} {'iter':>5}"
f" {'MB':>3} {'ME':>3} {'MR':>5} {'LFcut':>6} {'LFk':>4}")
print(hdr); print("-" * len(hdr))
for rank, t in enumerate(trials[:top_n], 1):
p = t.params
win = 2 ** p["win_exp"]
hop = win // p["hop_div"]
mb = "Y" if p.get("multiband") else "n"
me = "Y" if p.get("macro_expand") else "n"
lfc = p.get("lf_cutoff_hz", 0.0)
lfk = p.get("lf_k_min", 0)
print(f"{rank:>3} {t.value:>8.5f} {p['delta_db']:>5.2f}"
f" {p.get('lf_delta_db', p['delta_db']):>5.2f} {win:>5}"
f" {hop:>4} {p['release_ms']:>6.0f} {p['max_gain_db']:>5.1f}"
f" {str(p['eps']):>5} {p['max_iter']:>5}"
f" {mb:>3} {me:>3} {p.get('macro_ratio', 1.0):>5.2f}"
f" {lfc:>6.0f} {lfk:>4}")
best = trials[0]
p = best.params
win = 2 ** p["win_exp"]
hop = win // p["hop_div"]
n_pruned = sum(1 for t in study.trials
if t.state == optuna.trial.TrialState.PRUNED)
print("\n" + "═" * 60)
print("CONFIG OTTIMALE")
print("═" * 60)
print(f"""
params = DeclipParams(
algo = "sspade",
frame = "rdft",
mode = "soft",
delta_db = {p['delta_db']:.2f},
window_length = {win},
hop_length = {hop},
release_ms = {p['release_ms']:.1f},
max_gain_db = {p['max_gain_db']:.1f},
eps = {p['eps']},
max_iter = {p['max_iter']},
sample_rate = sr,
multiband = {p.get('multiband', False)},
band_crossovers = ({BAND_CROSSOVER_HZ},),
band_delta_db = ({p.get('lf_delta_db', p['delta_db']):.2f}, {p['delta_db']:.2f}),
macro_expand = {p.get('macro_expand', False)},
macro_ratio = {p.get('macro_ratio', 1.0):.2f},
lf_cutoff_hz = {p.get('lf_cutoff_hz', 0.0):.1f}, # v12
lf_k_min = {p.get('lf_k_min', 0)}, # v12
n_jobs = -1,
show_progress = True,
)""")
print(f"\n→ Best score : {best.value:.5f}")
print(f" Trials done : {len(trials)}")
print(f" Pruned : {n_pruned}")
# =============================================================================
# DEBUG EXPORT
# =============================================================================
# Parametri SPADE usati per il debug (best noti dal grid sweep precedente).
# Se un DB Optuna esiste e ha trial completati, vengono sostituiti dal best.
DEBUG_PARAMS = dict(
delta_db = 1.5,
window_length = 1024,
hop_length = 256,
release_ms = 100.0,
max_gain_db = 6.0,
eps = 0.05,
max_iter = 500,
)
def _pk_dbfs(a: np.ndarray) -> float:
pk = float(np.max(np.abs(a)))
return 20.0 * np.log10(pk) if pk > 1e-12 else -999.0
def _rms_dbfs(a: np.ndarray) -> float:
rms = float(np.sqrt(np.mean(a.astype(float) ** 2)))
return 20.0 * np.log10(rms) if rms > 1e-12 else -999.0
def _write_wav(path: Path, audio: np.ndarray, sr: int) -> None:
"""Scrive WAV float32 senza clipping. Avvisa se peak > 1.0."""
a2d = ensure_2d(audio).astype(np.float32)
pk = float(np.max(np.abs(a2d)))
if pk > 1.0:
print(f" [WARN] {path.name}: peak={pk:.4f} > 1.0 "
f"(+{20*np.log10(pk):.2f} dBFS) — float32, non clippato")
sf.write(str(path), a2d, sr, subtype="FLOAT")
def debug_export(
corpus: list,
base_dir: Path,
out_dir: Path,
n_files: int,
spade_params: dict,
) -> None:
"""
Esporta WAV di debug per i primi n_files item del corpus.
Per ogni file vengono scritti 6 WAV float32:
01_orig_with_noise drum + pink noise, normalizzato a 0 dBFS peak
(segnale prima del limiter)
02_limited uscita del limiter sintetico (input a SPADE)
03_gt_residual orig_with_noise - limited, @RESIDUAL_DBFS peak
04_spade_output uscita SPADE (float32, puo' superare 0 dBFS)
05_res_iter spade_output - limited, @RESIDUAL_DBFS peak
06_diff_residuals gt_residual - res_iter
ideale = silenzio = -inf dB
Stampa una tabella con peak dBFS e RMS dBFS per ogni traccia.
Livelli ATTESI:
01 peak = 0.00 dBFS (normalizzato)
02 peak ~ -LIMITER_THRESHOLD_DB dBFS (es. -1.5 dBFS)
03 peak = RESIDUAL_DBFS (es. -3.0 dBFS)
04 peak puo' essere > 0 dBFS (transiente recuperato)
05 peak = RESIDUAL_DBFS (es. -3.0 dBFS)
06 peak << 0 dBFS (piu' basso = SPADE piu' vicino al GT)
"""
out_dir.mkdir(parents=True, exist_ok=True)
items = corpus[:n_files]
col_w = max(len(it["file"]) for it in items) + 2
HDR = (f" {'file':<{col_w}} {'traccia':<22}"
f" {'peak dBFS':>10} {'RMS dBFS':>9} note")
SEP = " " + "-" * (len(HDR) - 2)
print()
if _HAS_RICH:
_console.rule("[bold cyan]DEBUG EXPORT[/]")
else:
print("=" * 65)
print("DEBUG EXPORT")
print("=" * 65)
print(f" Output dir : {out_dir}")
print(f" SPADE params : delta_db={spade_params['delta_db']}"
f" win={spade_params['window_length']}"
f" hop={spade_params['hop_length']}"
f" rel={spade_params['release_ms']}ms"
f" gain={spade_params['max_gain_db']}dB")
print(f" File esportati: {len(items)}")
print()
print(f" Livelli attesi:")
print(f" 01_orig_with_noise : ~ 0.00 dBFS (normalizzato prima del limiter)")
print(f" 02_limited : ~ {-LIMITER_THRESHOLD_DB:+.2f} dBFS (uscita limiter)")
print(f" 03_gt_residual : = {RESIDUAL_DBFS:+.2f} dBFS (normalizzato)")
print(f" 04_spade_output : > 0 dBFS possibile (transiente recuperato)")
print(f" 05_res_iter : = {RESIDUAL_DBFS:+.2f} dBFS (normalizzato)")
print(f" 06_diff_residuals : << 0 dBFS (piu' basso = pipeline piu' corretta)")
print()
print(HDR)
diff_peaks = []
for file_index, item in enumerate(items):
sr = item["sr"]
limited = item["limited"].copy()
gt_res = item["gt_res"]
stem = Path(item["file"]).stem
# ── Ricostruisci orig_with_noise ──────────────────────────────────
# Riesegue la stessa pipeline di build_corpus con il seed identico
orig_with_noise = None
for folder in DRUM_DIRS:
candidate = base_dir / folder / item["file"]
if candidate.exists():
try:
raw, _ = sf.read(str(candidate), always_2d=True)
raw = raw.astype(float)
rng = np.random.default_rng(seed=file_index)
orig_0 = normalize_to_0dBFS(raw)
mixed = ensure_2d(mix_pink_noise(orig_0, sr,
PINK_NOISE_LEVEL_DB, rng))
orig_with_noise = ensure_2d(normalize_to_0dBFS(mixed))
except Exception:
pass
break
if orig_with_noise is None:
# Fallback: ricostruiamo da limited + gt_res (approssimazione)
gt_scale = 10 ** (RESIDUAL_DBFS / 20.0) # peak di gt_res
lim_peak = 10 ** (-LIMITER_THRESHOLD_DB / 20.0) # peak atteso del limited
gt_raw = gt_res * (lim_peak / (gt_scale + 1e-12))
orig_with_noise = ensure_2d(normalize_to_0dBFS(limited + gt_raw))
# ── Esegui SPADE ──────────────────────────────────────────────────
try:
p = DeclipParams(sample_rate=sr, **FIXED_SOLVER, **spade_params)
fixed, _ = declip(limited.copy(), p)
fixed_2d = ensure_2d(fixed)
except Exception as exc:
print(f" [ERRORE SPADE] {item['file']}: {exc}")
continue
# ── Residual iterazione (scala RAW, senza normalizzazione) ───────────
# IMPORTANTE: il diff deve avvenire sulla scala comune PRIMA di
# normalizzare i due residual, altrimenti la normalizzazione
# indipendente rimuove l'informazione di ampiezza relativa.
#
# gt_res e res_raw sono entrambi derivati dallo stesso limited →
# hanno la stessa scala di riferimento.
# gt_res e' gia' stato normalizzato a RESIDUAL_DBFS in build_corpus;
# dobbiamo riportarlo alla scala raw per il confronto.
#
# Scala comune: usiamo il peak del limited come riferimento.
# limited peak ≈ 10^(-LIMITER_THRESHOLD_DB/20) → scala assoluta nota.
res_raw = fixed_2d - limited # residual SPADE in scala assoluta
# gt_res_raw: ricostruiamo dalla scala normalizzata
# gt_res = gt_res_raw / peak(gt_res_raw) * 10^(RESIDUAL_DBFS/20)
# → gt_res_raw = gt_res * peak(gt_res_raw) / 10^(RESIDUAL_DBFS/20)
# Poiche' peak(gt_res_raw) non e' salvato, lo stimiamo:
# gt_res_raw ≈ orig_with_noise - limited (ricostruito)
gt_res_raw_approx = ensure_2d(orig_with_noise) - limited
L = min(gt_res_raw_approx.shape[0], res_raw.shape[0])
# ── Diff sulla scala comune (raw, non normalizzata) ───────────────
diff_raw = gt_res_raw_approx[:L] - res_raw[:L]
# ── Cosine similarity temporale (scalare, sul canale L) ──────────
g_flat = gt_res_raw_approx[:L, 0] if gt_res_raw_approx.ndim == 2 else gt_res_raw_approx[:L]
e_flat = res_raw[:L, 0] if res_raw.ndim == 2 else res_raw[:L]
cos_sim_td = float(
np.dot(g_flat, e_flat) /
(np.linalg.norm(g_flat) * np.linalg.norm(e_flat) + 1e-12)
)
# ── Stima floor teorico del diff dovuto al rumore rosa ────────────
# Il limiter attenue anche i picchi del rumore rosa → quella parte
# sta nel GT_res ma NON in res_iter (SPADE non la recupera).
# Stimiamo quanto rumore e' nel GT_res come proxy del floor.
noise_gain_lin = 10 ** (PINK_NOISE_LEVEL_DB / 20.0)
# Ampiezza del rumore rispetto al limited: noise_gain ≈ fraction
# del GT_res che e' irrecuperabile da SPADE.
noise_floor_db = 20 * np.log10(noise_gain_lin + 1e-12) + RESIDUAL_DBFS
# In pratica: diff non puo' essere < noise_floor per costruzione.
# ── diff dBFS relativo al GT_res (SNR-like) ───────────────────────
diff_rms_db = _rms_dbfs(diff_raw[:L])
gt_rms_db = _rms_dbfs(gt_res_raw_approx[:L])
# diff_vs_gt: quanto e' grande il diff rispetto al GT (0 dB = diff = GT)
diff_vs_gt_db = diff_rms_db - gt_rms_db # piu' negativo = meglio
# Normalizza per l'export WAV
res_iter = normalize_peak(res_raw, RESIDUAL_DBFS)
diff_norm = normalize_peak(diff_raw, RESIDUAL_DBFS) if np.max(np.abs(diff_raw)) > 1e-12 else diff_raw
diff_peaks.append((diff_vs_gt_db, cos_sim_td, diff_rms_db, gt_rms_db))
# ── Definizione tracce ────────────────────────────────────────────
tracks = [
("01_orig_with_noise",
orig_with_noise,
f"drum+noise @0dBFS (input pipeline)"),
("02_limited",
limited,
f"uscita limiter (input SPADE) atteso: ~{-LIMITER_THRESHOLD_DB:+.2f}dBFS"),
("03_gt_residual",
gt_res,
f"GT residual @{RESIDUAL_DBFS:.0f}dBFS (include noise attenuation)"),
("04_spade_output",
fixed_2d,
f"SPADE output (float32, puo' >0dBFS)"),
("05_res_iter",
res_iter,
f"residual SPADE @{RESIDUAL_DBFS:.0f}dBFS (solo componente sparsa)"),
("06_diff_residuals",
diff_norm,
f"GT - iter @{RESIDUAL_DBFS:.0f}dBFS "
f"cos_sim={cos_sim_td:.3f} diff/GT={diff_vs_gt_db:+.1f}dB "
f"noise_floor≈{noise_floor_db:+.1f}dB"),
]
# ── Soglia realistica per il diff ─────────────────────────────────
# Il diff non puo' essere < noise_floor per costruzione del corpus.
# Calibriamo la soglia [OK] a noise_floor + 6 dB (margine).
ok_threshold = noise_floor_db + 6.0 # tipicamente attorno a -17 dBFS
warn_threshold = ok_threshold + 10.0 # tutto sopra e' davvero anomalo
# ── Stampa tabella + scrivi WAV ───────────────────────────────────
print(SEP)
for track_name, audio, note in tracks:
pk = _pk_dbfs(audio)
rms = _rms_dbfs(audio)
flag = ""
if track_name == "06_diff_residuals":
if diff_vs_gt_db < -12: flag = "[OK] buona convergenza"
elif diff_vs_gt_db < -6: flag = "[~] convergenza parziale"
else: flag = "[WARN] diff elevato rispetto al GT"
row = (f" {item['file']:<{col_w}} {track_name:<22}"
f" {pk:>+10.2f} {rms:>+9.2f} {note} {flag}")
if _HAS_RICH:
color = ("green" if "[OK]" in flag else
"yellow" if "[~]" in flag else
"red" if "[WARN]" in flag else "")
colored_row = row.replace(flag, f"[{color or 'dim'}]{flag}[/]") if flag else row
_console.print(colored_row)
else:
print(row)
wav_path = out_dir / f"{stem}__{track_name}.wav"
_write_wav(wav_path, audio, sr)
# ── Analisi spettrale per banda: LF vs HF ─────────────────────────
# Risponde alla domanda: quanto residual c'e' nelle basse frequenze,
# e quanto ne recupera SPADE?
#
# Bands:
# Sub-bass : 20 – 80 Hz (fondamentale kick, body)
# Bass : 80 – 250 Hz (corpo kick, coda)
# Low-mid : 250 – 800 Hz (presenza)
# High-mid : 800 – 4000 Hz (attacco, click)
# High : 4k – 20k Hz (aria, snap)
#
# Per ogni banda misura:
# GT_energy = energia del GT residual (quanto il limiter ha tolto)
# iter_energy = energia recuperata da SPADE
# recovery % = iter_energy / GT_energy × 100
def band_energy(audio_2d, sr, f_lo, f_hi):
"""RMS energy in dB di una banda passante [f_lo, f_hi] Hz."""
mono = audio_2d[:, 0] if audio_2d.ndim == 2 else audio_2d
N = len(mono)
if N < 8:
return -999.0
# Butterworth bandpass (o lowpass/highpass ai bordi)
nyq = sr / 2.0
lo = max(f_lo / nyq, 1e-4)
hi = min(f_hi / nyq, 0.9999)
if lo >= hi:
return -999.0
if lo < 1e-3:
b, a = sig.butter(4, hi, btype="low")
else:
b, a = sig.butter(4, [lo, hi], btype="band")
filtered = sig.filtfilt(b, a, mono)
return _rms_dbfs(filtered)
BANDS = [
("Sub-bass ", 20, 80),
("Bass ", 80, 250),
("Low-mid ", 250, 800),
("High-mid ", 800, 4000),
("High ", 4000, 20000),
]
gt_mono = gt_res[:, 0] if gt_res.ndim == 2 else gt_res
ri_mono = res_iter[:, 0] if res_iter.ndim == 2 else res_iter
# Normalizza GT e iter sulla stessa scala (rimuovi la normalizzazione
# a RESIDUAL_DBFS per confrontare energie assolute)
gt_raw_for_bands = gt_res_raw_approx
iter_raw_for_bands = res_raw
print()
band_hdr = f" {'banda':<12} {'GT_res RMS':>10} {'SPADE rec RMS':>13} {'recovery':>9} {'limitato?'}"
print(f" Analisi spettrale per banda — {item['file']}")
print(f" {'─'*75}")
print(band_hdr)
print(f" {'─'*75}")
for bname, f_lo, f_hi in BANDS:
gt_db = band_energy(gt_raw_for_bands, sr, f_lo, f_hi)
iter_db = band_energy(iter_raw_for_bands, sr, f_lo, f_hi)
if gt_db < -60:
recovery_str = " — (silenzio)"
flag_b = ""
else:
diff_b = iter_db - gt_db # positivo = SPADE supera GT (overrecovery)
# recovery: 0 dB diff = recupero perfetto, molto negativo = sotto-recupero
if diff_b > -3:
flag_b = "OK"
elif diff_b > -9:
flag_b = "~ parziale"
else:
flag_b = "!! sotto-recupero"
recovery_str = f"{diff_b:>+7.1f} dB {flag_b}"
line = f" {bname:<12} {gt_db:>+10.1f} {iter_db:>+13.1f} {recovery_str}"
if _HAS_RICH:
color = "green" if "OK" in recovery_str else (
"yellow" if "~" in recovery_str else (
"red" if "!!" in recovery_str else "dim"))
_console.print(f"[{color}]{line}[/]")
else:
print(line)
print()
print(SEP)
print()
if diff_peaks:
vs_gt_vals = [d[0] for d in diff_peaks]
cos_vals = [d[1] for d in diff_peaks]
avg_vs_gt = float(np.mean(vs_gt_vals))
best_vs_gt = float(np.min(vs_gt_vals))
worst_vs_gt = float(np.max(vs_gt_vals))
avg_cos = float(np.mean(cos_vals))
noise_floor_db = 20 * np.log10(10 ** (PINK_NOISE_LEVEL_DB / 20.0) + 1e-12) + RESIDUAL_DBFS
print(f" RIEPILOGO 06_diff_residuals:")
print(f" diff/GT_rms media : {avg_vs_gt:>+7.2f} dB (0 dB = diff grande quanto GT)")
print(f" diff/GT_rms migliore: {best_vs_gt:>+7.2f} dB")
print(f" diff/GT_rms peggiore: {worst_vs_gt:>+7.2f} dB")
print(f" cos_sim TD media : {avg_cos:>8.4f} (1.0 = identici)")
print()
print(f" NOTA IMPORTANTE:")
print(f" Il rumore rosa ({PINK_NOISE_LEVEL_DB} dB) fa parte del GT_res ma")
print(f" NON puo' essere recuperato da SPADE (non e' sparso).")
print(f" Floor teorico del diff: ≈ {noise_floor_db:+.1f} dBFS — questo e' il")
print(f" limite fisico massimo raggiungibile con questo corpus.")
print(f" Un diff/GT < -6 dB indica buona convergenza di SPADE.")
print()
if worst_vs_gt < -12:
verdict = "OK Convergenza eccellente — SPADE recupera bene i transienti"
elif worst_vs_gt < -6:
verdict = "~ Convergenza buona — residuo compatibile con il noise floor"
else:
verdict = "INFO diff dominato dal rumore rosa — comportamento atteso e corretto"
print(f" Verdetto: {verdict}")
print(f"\n WAV scritti in : {out_dir}/")
print(f" Formato : float32, nessun clipping (usa un editor che supporta >0dBFS)")
print(f" Nomenclatura : <stem>__<N>_<traccia>.wav")
def save_csv(study: "optuna.Study"):
import csv
trials = sorted(
[t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE],
key=lambda t: t.value or 0, reverse=True,
)
with open(OUT_CSV, "w", newline="") as f:
w = csv.writer(f)
w.writerow(["rank", "score", "delta_db", "lf_delta_db",
"window_length", "hop_length", "release_ms", "max_gain_db",
"eps", "max_iter", "multiband", "macro_expand", "macro_ratio"])
for rank, t in enumerate(trials, 1):
p = t.params
win = 2 ** p["win_exp"]
hop = win // p["hop_div"]
w.writerow([
rank, round(t.value, 6),
p["delta_db"],
round(p.get("lf_delta_db", p["delta_db"]), 2),
win, hop,
p["release_ms"], p["max_gain_db"], p["eps"], p["max_iter"],
int(p.get("multiband", False)),
int(p.get("macro_expand", False)),
round(p.get("macro_ratio", 1.0), 2),
])
print(f"\n 📄 CSV: {OUT_CSV}")
# =============================================================================
# MAIN
# =============================================================================
def parse_args():
ap = argparse.ArgumentParser(description="Smart Bayesian sweep per S-SPADE v2")
ap.add_argument("--trials", type=int, default=200,
help="Numero di trial Optuna (default: 200)")
ap.add_argument("--resume", action="store_true",
help="Carica lo study esistente e aggiunge trial")
ap.add_argument("--report", action="store_true",
help="Solo report (nessun nuovo trial)")
ap.add_argument("--base-dir", type=str, default=".",
help="Cartella radice con Kicks/Snares/Perc/Tops")
ap.add_argument("--corpus-size", type=int, default=None,
help="Limita il corpus a N file (None = tutti)")
ap.add_argument("--top", type=int, default=20,
help="Quanti trial mostrare nel ranking (default: 20)")
ap.add_argument("--no-prune", action="store_true",
help="Disabilita MedianPruner (più lento ma completo)")
ap.add_argument("--debug-export", action="store_true",
help="Esporta WAV di debug per i primi N file del corpus (no sweep)")
ap.add_argument("--debug-dir", type=str, default="debug_export",
help="Cartella output WAV di debug (default: debug_export)")
ap.add_argument("--debug-n", type=int, default=10,
help="Quanti file esportare in debug (default: 10)")
return ap.parse_args()
def main():
args = parse_args()
missing = []
if not _HAS_OPTUNA: missing.append("optuna")
if not _HAS_SPADE: missing.append("spade_declip_v11.py (nella stessa dir)")
if missing:
pip = [m for m in missing if not m.endswith(")")]
sys.exit("Mancante:\n pip install " + " ".join(pip)
+ ("\n " + "\n ".join(m for m in missing if m.endswith(")")) if any(m.endswith(")") for m in missing) else ""))
base_dir = Path(args.base_dir).resolve()
storage = f"sqlite:///{STUDY_NAME}.db"
sampler = TPESampler(seed=42, multivariate=True, warn_independent_sampling=False)
pruner = (MedianPruner(n_startup_trials=10, n_warmup_steps=3)
if not args.no_prune else optuna.pruners.NopPruner())
if args.report:
try:
study = optuna.load_study(study_name=STUDY_NAME, storage=storage,
sampler=sampler, pruner=pruner)
except Exception:
sys.exit(f"Nessuno study trovato in {STUDY_NAME}.db")
print_report(study, top_n=args.top)
save_csv(study)
return
# ── Debug export ──────────────────────────────────────────────────────────
if args.debug_export:
# Usa i parametri del best trial se esiste un DB, altrimenti DEBUG_PARAMS
spade_params = dict(DEBUG_PARAMS)
try:
study = optuna.load_study(study_name=STUDY_NAME, storage=storage,
sampler=sampler, pruner=pruner)
completed = [t for t in study.trials
if t.state == optuna.trial.TrialState.COMPLETE]
if completed:
best_t = max(completed, key=lambda t: t.value or 0)
p = best_t.params
win = 2 ** p["win_exp"]
hop = win // p["hop_div"]
spade_params = dict(
delta_db = p["delta_db"],
window_length = win,
hop_length = hop,
release_ms = p["release_ms"],
max_gain_db = p["max_gain_db"],
eps = p["eps"],
max_iter = p["max_iter"],
)
print(f" [DEBUG] Usando best trial #{best_t.number}"
f" (score={best_t.value:.5f}) dal DB.")
except Exception:
print(f" [DEBUG] DB non trovato — uso DEBUG_PARAMS di default.")
# Costruisci corpus (limitato a debug_n file per velocita')
corpus = build_corpus(base_dir, max_files=args.debug_n)
if not corpus:
sys.exit("Corpus vuoto. Controlla --base-dir.")
debug_export(
corpus = corpus,
base_dir = base_dir,
out_dir = Path(args.debug_dir),
n_files = args.debug_n,
spade_params = spade_params,
)
return
# ── Corpus ───────────────────────────────────────────────────────────────
print("\n" + "=" * 65)
print("CORPUS + LIMITER SINTETICO (Case 1 — threshold-based)")
print("=" * 65)
print(f" Base dir : {base_dir}")
print(f" Threshold : −{LIMITER_THRESHOLD_DB} dBFS")
print(f" Release : {LIMITER_RELEASE_MS} ms")
print(f" Level align: NESSUNO — loudness invariata per costruzione")
print(f" Rumore rosa: {PINK_NOISE_LEVEL_DB} dB rel. peak "
f"(simula sottofondo musicale sotto il transiente)")
corpus = build_corpus(base_dir, max_files=args.corpus_size)
if not corpus:
sys.exit("Corpus vuoto. Controlla --base-dir e le cartelle.")
# ── GPU warm-up: forza MCLK al massimo prima del primo trial ─────────────
# Su RDNA2 (RX 6700 XT) il memory clock parte da 96 MHz (idle) e impiega
# ~200ms per salire a 1750 MHz. Un primo batch piccolo lascia MCLK basso
# per tutto il trial. Questo dummy dispatch forza il ramp-up in anticipo.
try:
import torch
if torch.cuda.is_available():
_wd = "cuda"
_sz = 8192 * 1024 # 8 MB → sufficiente per trigger MCLK ramp
_dummy = torch.randn(_sz, device=_wd, dtype=torch.float32)
_dummy2 = _dummy * 2.0 + _dummy.roll(1)
torch.cuda.synchronize()
del _dummy, _dummy2
print(" ✓ GPU warm-up completato (MCLK ramp forzato)")
except Exception:
pass
print(f"\n ✓ {len(corpus)} file nel corpus\n")
col_w = max(len(item["file"]) for item in corpus) + 2
for item in corpus:
rms = float(np.sqrt(np.mean(item["gt_res"] ** 2)))
peak = float(np.max(np.abs(item["gt_res"])))
print(f" {item['file']:<{col_w}} sr={item['sr']} "
f"GT rms={rms:.4f} peak={peak:.4f}")
# ── Study ─────────────────────────────────────────────────────────────────
print(f"\n{'='*65}")
print(f"OTTIMIZZAZIONE BAYESIANA — {args.trials} trial")
print(f"TPE (multivariate) + MedianPruner | storage: {STUDY_NAME}.db")
print(f"{'='*65}\n")
study = optuna.create_study(
study_name = STUDY_NAME,
storage = storage,
sampler = sampler,
pruner = pruner,
direction = "maximize",
load_if_exists = True,
)
# ── Progress bar (rich → tqdm → plain fallback) ───────────────────────────
try:
from rich.progress import (
Progress, BarColumn, TextColumn,
TimeElapsedColumn, TimeRemainingColumn, MofNCompleteColumn,
)
_has_rich_progress = True
except ImportError:
_has_rich_progress = False
try:
import tqdm as _tqdm_mod
_has_tqdm = True
except ImportError:
_has_tqdm = False
# Stato condiviso aggiornato dal callback.
# Pre-popolato con i trial gia' nel DB in caso di --resume,
# cosi' la progress bar mostra il conteggio corretto dall'inizio.
_existing_complete = [t for t in study.trials
if t.state == optuna.trial.TrialState.COMPLETE]
_existing_pruned = [t for t in study.trials
if t.state == optuna.trial.TrialState.PRUNED]
if _existing_complete:
_best_existing = max(_existing_complete, key=lambda t: t.value or 0)
_init_best = _best_existing.value or 0.0
_init_best_p = dict(_best_existing.params)
_init_last = _init_best
else:
_init_best, _init_best_p, _init_last = float("-inf"), {}, float("-inf")
_state = {
"done": len(_existing_complete),
"pruned": len(_existing_pruned),
"best": _init_best,
"best_p": _init_best_p,
"last": _init_last,
"t0": time.time(),
"n_total": len(_existing_complete) + len(_existing_pruned) + args.trials,
}
def _fmt_best(state: dict) -> str:
"""Stringa compatta con i parametri del best trial corrente."""
bp = state["best_p"]
if not bp:
return "—"
win = 2 ** bp.get("win_exp", 10)
hop = win // bp.get("hop_div", 4)
return (f"δ={bp.get('delta_db',0):.2f} "
f"win={win} hop={hop} "
f"rel={bp.get('release_ms',0):.0f}ms "
f"gain={bp.get('max_gain_db',0):.1f}dB")
# ── Rich progress bar ─────────────────────────────────────────────────────
if _has_rich_progress:
progress = Progress(
TextColumn("[bold cyan]Trial[/] [cyan]{task.completed}/{task.total}[/]"),
BarColumn(bar_width=32),
MofNCompleteColumn(),
TextColumn(" score [green]{task.fields[last]:.5f}[/]"),
TextColumn(" best [bold green]{task.fields[best]:.5f}[/]"),
TextColumn(" [dim]pruned {task.fields[pruned]}[/]"),
TimeElapsedColumn(),
TextColumn("ETA"),
TimeRemainingColumn(),
refresh_per_second=4,
transient=False,
)
task_id = None # creato dentro il context
def on_trial_end(study, trial):
fin = (trial.state == optuna.trial.TrialState.COMPLETE)
prn = (trial.state == optuna.trial.TrialState.PRUNED)
if fin:
_state["done"] += 1
_state["last"] = trial.value or 0.0
if _state["last"] > _state["best"]:
_state["best"] = _state["last"]
_state["best_p"] = dict(study.best_params)
elif prn:
_state["pruned"] += 1
progress.update(
task_id,
advance = 1,
last = _state["last"],
best = max(_state["best"], 0.0),
pruned = _state["pruned"],
)
t0 = time.time()
try:
with progress:
task_id = progress.add_task(
"sweep",
total = _state["n_total"],
completed = _state["done"] + _state["pruned"],
last = max(_state["last"], 0.0),
best = max(_state["best"], 0.0),
pruned = _state["pruned"],
)
study.optimize(
make_objective(corpus),
n_trials = args.trials,
callbacks = [on_trial_end],
show_progress_bar = False,
)
except KeyboardInterrupt:
print("\n[!] Interrotto — risultati parziali salvati.")
# ── tqdm fallback ─────────────────────────────────────────────────────────
elif _has_tqdm:
import tqdm
_already = _state["done"] + _state["pruned"]
pbar = tqdm.tqdm(
total = _state["n_total"],
initial = _already,
unit = "trial",
bar_format = "{l_bar}{bar}| {n}/{total} [{elapsed}<{remaining}]",
)
if _already > 0:
pbar.set_postfix(
score = f"{max(_state['last'], 0.0):.5f}",
best = f"{max(_state['best'], 0.0):.5f}",
pruned = _state["pruned"],
)
def on_trial_end(study, trial):
fin = trial.state == optuna.trial.TrialState.COMPLETE
prn = trial.state == optuna.trial.TrialState.PRUNED
if fin:
_state["done"] += 1
_state["last"] = trial.value or 0.0
if _state["last"] > _state["best"]:
_state["best"] = _state["last"]
_state["best_p"] = dict(study.best_params)
elif prn:
_state["pruned"] += 1
pbar.update(1)
pbar.set_postfix(
score = f"{_state['last']:.5f}",
best = f"{_state['best']:.5f}",
pruned = _state["pruned"],
)
t0 = time.time()
try:
study.optimize(
make_objective(corpus),
n_trials = args.trials,
callbacks = [on_trial_end],
show_progress_bar = False,
)
except KeyboardInterrupt:
print("\n[!] Interrotto — risultati parziali salvati.")
finally:
pbar.close()
# ── Plain fallback ────────────────────────────────────────────────────────
else:
def on_trial_end(study, trial):
fin = trial.state == optuna.trial.TrialState.COMPLETE
prn = trial.state == optuna.trial.TrialState.PRUNED
if fin:
_state["done"] += 1
_state["last"] = trial.value or 0.0
if _state["last"] > _state["best"]:
_state["best"] = _state["last"]
_state["best_p"] = dict(study.best_params)
elapsed = time.time() - _state["t0"]
done_tot = _state["done"] + _state["pruned"]
eta_s = (elapsed / done_tot) * (_state["n_total"] - done_tot) if done_tot else 0
is_best = abs(_state["last"] - _state["best"]) < 1e-9
bar_n = int(32 * done_tot / max(_state["n_total"], 1))
bar = "█" * bar_n + "░" * (32 - bar_n)
print(f"\r[{bar}] {done_tot}/{_state['n_total']}"
f" {'★' if is_best else ' '}score={_state['last']:.5f}"
f" best={_state['best']:.5f}"
f" pruned={_state['pruned']}"
f" ETA {eta_s/60:.1f}min ", end="", flush=True)
elif prn:
_state["pruned"] += 1
t0 = time.time()
try:
study.optimize(
make_objective(corpus),
n_trials = args.trials,
callbacks = [on_trial_end],
show_progress_bar = False,
)
except KeyboardInterrupt:
print("\n[!] Interrotto — risultati parziali salvati.")
print() # newline dopo la riga \r
elapsed = time.time() - t0
n_done = sum(1 for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE)
n_prune = sum(1 for t in study.trials if t.state == optuna.trial.TrialState.PRUNED)
print(f"\n Completati: {n_done} | Pruned: {n_prune}"
f" | Tempo totale: {elapsed/60:.1f} min"
f" | Media: {elapsed/max(n_done+n_prune,1):.1f} s/trial")
print_report(study, top_n=args.top)
save_csv(study)
print("\nDone.")
if __name__ == "__main__":
main()