| """ |
| run_smart_sweep.py — S-SPADE · Bayesian parameter search (v2) |
| =================================================================== |
| |
| PIPELINE GROUND-TRUTH (Case 1 — threshold-based limiter) |
| --------------------------------------------------------- |
| Il limiter sintetico è threshold-based: |
| - Originale normalizzato a 0 dBFS peak |
| - Limiter: attua solo sui picchi sopra la soglia → output max peak ≈ −threshold_db |
| - Il CORPO del segnale (loudness percepita) rimane invariato per definizione |
| - NON si applica nessun gain al segnale limitato dopo il processing |
| |
| Allineamento per il calcolo residual: |
| Originale e limited sono già sulla stessa scala (loudness uguale, picchi diversi). |
| Nessuna normalizzazione LUFS / RMS necessaria o corretta. |
| |
| GT_res = original_0dBFS − limited (scale identiche) |
| res_iter = spade_output − limited (idem) |
| |
| Entrambi vengono poi normalizzati a RESIDUAL_DBFS peak SOLO per rendere |
| comparabili file con diversi livelli assoluti — non altera la logica. |
| |
| Metrica ideale: |
| GT_res ≡ res_iter → cosine_sim = 1.0 → differenza = −∞ dB |
| |
| Ottimizzatore: Optuna TPE (Bayesian) + MedianPruner |
| Storage: SQLite (riprendibile con --resume) |
| Corpus: tutti i drum sample in Kicks / Snares / Perc / Tops |
| |
| DIPENDENZE |
| ---------- |
| pip install numpy scipy soundfile optuna rich |
| (pyloudnorm NON necessario) |
| |
| USO |
| --- |
| python run_smart_sweep.py # 200 trial |
| python run_smart_sweep.py --trials 50 # test rapido |
| python run_smart_sweep.py --resume # riprende da DB |
| python run_smart_sweep.py --report # solo risultati |
| python run_smart_sweep.py --base-dir /path/SPADE # cartella custom |
| """ |
|
|
| import argparse |
| import logging |
| import sys |
| import time |
| import warnings |
| from pathlib import Path |
| from typing import Dict, List, Optional |
|
|
| import numpy as np |
| import scipy.signal as sig |
| import soundfile as sf |
|
|
| logging.getLogger("optuna").setLevel(logging.WARNING) |
|
|
| |
| try: |
| import optuna |
| from optuna.samplers import TPESampler |
| from optuna.pruners import MedianPruner |
| _HAS_OPTUNA = True |
| except ImportError: |
| _HAS_OPTUNA = False |
| warnings.warn("optuna non trovato — pip install optuna") |
|
|
| |
| try: |
| from rich.console import Console |
| from rich.table import Table |
| _console = Console() |
| _HAS_RICH = True |
| except ImportError: |
| _HAS_RICH = False |
| _console = None |
|
|
| |
| try: |
| from spade_declip_v12 import declip, DeclipParams |
| _HAS_SPADE = True |
| except ImportError: |
| _HAS_SPADE = False |
| warnings.warn("spade_declip_v12.py non trovato") |
|
|
| |
| |
| |
|
|
| DRUM_DIRS = ["Kicks", "Snares", "Perc", "Tops"] |
|
|
| |
| |
| |
| |
| |
| LIMITER_THRESHOLD_DB = 3.0 |
| LIMITER_RELEASE_MS = 80.0 |
| |
|
|
| |
| |
| RESIDUAL_DBFS = -3.0 |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| PINK_NOISE_LEVEL_DB = -20.0 |
|
|
| |
| STUDY_NAME = "spade_smart_v2_thr3db" |
| OUT_CSV = "smart_sweep_results.csv" |
|
|
| |
| FIXED_SOLVER = dict( |
| algo = "sspade", |
| frame = "rdft", |
| mode = "soft", |
| s = 1, |
| r = 1, |
| n_jobs = 1, |
| verbose = False, |
| show_progress = False, |
| use_gpu = True, |
| |
| ) |
|
|
| |
| |
| BAND_CROSSOVER_HZ = 250.0 |
|
|
| |
| |
| |
|
|
| def ensure_2d(a: np.ndarray) -> np.ndarray: |
| return a[:, None] if a.ndim == 1 else a |
|
|
|
|
| def normalize_to_0dBFS(a: np.ndarray) -> np.ndarray: |
| """Scala a 0 dBFS peak — usato solo sull'originale come riferimento comune.""" |
| pk = np.max(np.abs(a)) |
| return a / pk if pk > 1e-12 else a |
|
|
|
|
| def normalize_peak(a: np.ndarray, target_dbfs: float) -> np.ndarray: |
| """ |
| Scala a target_dbfs dBFS peak. |
| Usato SOLO sui residual per comparabilità cross-file; |
| non altera la logica perché GT e iter vengono scalati identicamente. |
| """ |
| pk = np.max(np.abs(a)) |
| return a * (10 ** (target_dbfs / 20.0) / pk) if pk > 1e-12 else a |
|
|
|
|
| def generate_pink_noise(n_samples: int, n_channels: int, rng: np.random.Generator) -> np.ndarray: |
| """ |
| Genera rumore rosa (1/f) tramite filtro IIR di Voss-McCartney (approssimazione |
| a 5 poli, accurata entro ±1 dB nel range 20 Hz – 20 kHz). |
| |
| Output: shape (n_samples, n_channels), RMS normalizzato a 1.0 (prima |
| del mix-in con PINK_NOISE_LEVEL_DB, che controlla il livello finale). |
| |
| Algoritmo: rumore bianco filtrato con H(z) = 1 / A(z) dove i coefficienti |
| sono ottimizzati per approssimare una densità spettrale 1/f. |
| """ |
| |
| |
| b = np.array([0.049922035, -0.095993537, 0.050612699, -0.004408786]) |
| a = np.array([1.0, -2.494956002, 2.017265875, -0.522189400]) |
|
|
| out = np.empty((n_samples, n_channels)) |
| for c in range(n_channels): |
| white = rng.standard_normal(n_samples) |
| pink = sig.lfilter(b, a, white) |
| rms = np.sqrt(np.mean(pink ** 2)) |
| out[:, c] = pink / (rms + 1e-12) |
|
|
| return out |
|
|
|
|
| def mix_pink_noise( |
| audio_0dBFS: np.ndarray, |
| sr: int, |
| level_db: float, |
| rng: np.random.Generator, |
| ) -> np.ndarray: |
| """ |
| Mixa rumore rosa nel segnale a un livello relativo al suo peak. |
| |
| level_db < 0 → il rumore è sotto il peak del drum (es. −20 dB) |
| Il rumore dura quanto il sample; se il sample è stereo, il rumore è stereo |
| (canali indipendenti → decorrelato come un vero fondo musicale). |
| |
| Il segnale in uscita può superare 0 dBFS di qualche frazione di dB: è |
| corretto, il limiter che segue si occupa di riportarlo sotto la soglia. |
| """ |
| audio = ensure_2d(audio_0dBFS) |
| N, C = audio.shape |
|
|
| noise = generate_pink_noise(N, C, rng) |
| |
| peak = np.max(np.abs(audio)) |
| gain = peak * (10 ** (level_db / 20.0)) |
| mixed = audio + noise * gain |
| |
| |
| return mixed[:, 0] if audio_0dBFS.ndim == 1 else mixed |
|
|
|
|
| |
| |
| |
|
|
| def apply_brickwall_limiter( |
| audio_0dBFS: np.ndarray, |
| sr: int, |
| threshold_db: float = LIMITER_THRESHOLD_DB, |
| release_ms: float = LIMITER_RELEASE_MS, |
| ) -> np.ndarray: |
| """ |
| Brickwall limiter threshold-based. |
| |
| Input: audio_0dBFS — già a 0 dBFS peak, shape (N,) o (N, C) |
| Output: segnale limitato, stessa shape — NON boosted, NON clippato |
| |
| Gain envelope: |
| se |x[n]| > threshold_lin → target_gain = threshold_lin / |x[n]| |
| altrimenti → target_gain = 1.0 |
| Attack : istantaneo (1 campione, true brickwall) |
| Release: esponenziale con costante release_ms |
| |
| Post-processing: NESSUNO. |
| Il segnale in uscita ha max peak ≈ −threshold_db dBFS. |
| La loudness percepita è invariata rispetto all'input. |
| """ |
| thr_lin = 10 ** (-abs(threshold_db) / 20.0) |
| rc = np.exp(-1.0 / max(release_ms * sr / 1000.0, 1e-9)) |
|
|
| audio = ensure_2d(audio_0dBFS).copy() |
| N, C = audio.shape |
| out = np.empty_like(audio) |
|
|
| for c in range(C): |
| ch = audio[:, c] |
| env = 1.0 |
| g = np.empty(N) |
| for n in range(N): |
| pk = abs(ch[n]) |
| target = thr_lin / pk if pk > thr_lin else 1.0 |
| |
| env = target if target < env else rc * env + (1.0 - rc) * target |
| g[n] = env |
| out[:, c] = ch * g |
|
|
| |
| return out[:, 0] if audio_0dBFS.ndim == 1 else out |
|
|
|
|
| |
| |
| |
|
|
| def cosine_sim_tf( |
| gt: np.ndarray, |
| est: np.ndarray, |
| sr: int, |
| win_samples: int = 1024, |
| hop_samples: int = 256, |
| n_bands: int = 12, |
| ) -> float: |
| """ |
| Similarità coseno media su micro-finestre tempo-frequenziali. |
| Input: entrambi già a RESIDUAL_DBFS peak. |
| Output: scalare in [0, 1]. Target ideale = 1.0. |
| """ |
| L = min(gt.shape[0], est.shape[0]) |
| g = (gt[:L, 0] if gt.ndim == 2 else gt[:L]).copy() |
| e = (est[:L, 0] if est.ndim == 2 else est[:L]).copy() |
|
|
| win = min(win_samples, max(32, L // 4)) |
| hop = min(hop_samples, win // 2) |
|
|
| if L < win or win < 32: |
| denom = np.linalg.norm(g) * np.linalg.norm(e) + 1e-12 |
| return float(np.dot(g, e) / denom) |
|
|
| _, _, Zg = sig.stft(g, fs=sr, window="hann", |
| nperseg=win, noverlap=win - hop, |
| boundary=None, padded=False) |
| _, _, Ze = sig.stft(e, fs=sr, window="hann", |
| nperseg=win, noverlap=win - hop, |
| boundary=None, padded=False) |
|
|
| n_freqs, n_frames = Zg.shape |
| if n_frames == 0: |
| return float(np.dot(g, e) / (np.linalg.norm(g) * np.linalg.norm(e) + 1e-12)) |
|
|
| edges = np.unique(np.round( |
| np.logspace(0, np.log10(max(n_freqs, 2)), min(n_bands, n_freqs) + 1) |
| ).astype(int)) |
| edges = np.clip(edges, 0, n_freqs) |
|
|
| sims = [] |
| for i in range(len(edges) - 1): |
| f0, f1 = int(edges[i]), int(edges[i + 1]) |
| if f1 <= f0: |
| continue |
| Mg = np.abs(Zg[f0:f1, :]) |
| Me = np.abs(Ze[f0:f1, :]) |
| dot = np.sum(Mg * Me, axis=0) |
| norm_g = np.sqrt(np.sum(Mg ** 2, axis=0)) + 1e-12 |
| norm_e = np.sqrt(np.sum(Me ** 2, axis=0)) + 1e-12 |
| sims.extend((dot / (norm_g * norm_e)).tolist()) |
|
|
| return float(np.mean(sims)) if sims else 0.0 |
|
|
|
|
| |
| |
| |
|
|
| def build_corpus(base_dir: Path, max_files: Optional[int] = None) -> List[Dict]: |
| """ |
| Per ogni drum sample: |
| 1. Carica e normalizza a 0 dBFS peak (riferimento comune cross-file) |
| 2. Mixa rumore rosa a PINK_NOISE_LEVEL_DB rel. al peak ← NUOVO |
| Il mix avviene in float (può temporaneamente superare 0 dBFS) |
| 3. Normalizza il mix (drum + noise) a 0 dBFS peak |
| Riferimento comune prima di tutta la pipeline successiva |
| 4. Applica limiter sintetico su (drum + noise) normalizzato → limited |
| 4. GT_res_raw = (drum + noise) − limited (stessa scala, nessun gain) |
| 5. Scarta file dove il limiter non interviene |
| 6. Normalizza GT_res a RESIDUAL_DBFS (solo comparabilità cross-file) |
| |
| Il rumore è riproducibile: ogni file usa un seed deterministico derivato |
| dal suo indice nel corpus, così i trial sono comparabili tra loro. |
| """ |
| corpus = [] |
| extensions = {".wav", ".flac", ".aif", ".aiff"} |
| file_index = 0 |
|
|
| for folder in DRUM_DIRS: |
| d = base_dir / folder |
| if not d.exists(): |
| print(f" [WARN] Cartella non trovata: {d}") |
| continue |
| for f in sorted(d.glob("*")): |
| if f.suffix.lower() not in extensions: |
| continue |
| try: |
| audio, sr = sf.read(str(f), always_2d=True) |
| audio = audio.astype(float) |
| except Exception as exc: |
| print(f" [WARN] {f.name}: {exc}") |
| continue |
|
|
| if audio.shape[0] < 64: |
| continue |
|
|
| |
| orig = normalize_to_0dBFS(audio) |
|
|
| |
| rng = np.random.default_rng(seed=file_index) |
| orig_with_noise = ensure_2d(mix_pink_noise(orig, sr, |
| PINK_NOISE_LEVEL_DB, rng)) |
| file_index += 1 |
|
|
| |
| |
| |
| orig_with_noise = ensure_2d(normalize_to_0dBFS(orig_with_noise)) |
|
|
| |
| limited = ensure_2d(apply_brickwall_limiter(orig_with_noise, sr)) |
|
|
| |
| gt_res_raw = orig_with_noise - limited |
|
|
| |
| if np.max(np.abs(gt_res_raw)) < 1e-6: |
| print(f" [SKIP] {f.name} — picco sotto la soglia, limiter inattivo") |
| continue |
|
|
| |
| gt_res = normalize_peak(gt_res_raw, RESIDUAL_DBFS) |
|
|
| corpus.append({ |
| "file" : f.name, |
| "sr" : sr, |
| "limited" : limited, |
| "gt_res" : gt_res, |
| }) |
|
|
| if max_files and len(corpus) >= max_files: |
| return corpus |
|
|
| return corpus |
|
|
|
|
| |
| |
| |
|
|
| def evaluate_one(item: Dict, params: dict) -> Optional[float]: |
| """ |
| Esegue SPADE su limited, calcola il residual e lo confronta con GT. |
| |
| params contiene parametri SPADE puri + flag di alto livello: |
| multiband (bool) -- split LF/HF, elabora separatamente |
| macro_expand (bool) -- envelope pre-pass per recupero corpo LF |
| macro_ratio (float) -- rapporto espansione (1.0 = bypass) |
| lf_delta_db (float) -- delta_db per banda LF (<= BAND_CROSSOVER_HZ) |
| il delta_db standard e' usato per la banda HF |
| lf_cutoff_hz (float) -- v12: Hz sotto cui riservare bin LF (0 = off) |
| lf_k_min (int) -- v12: slot LF garantiti per iterazione ADMM |
| """ |
| try: |
| sr = item["sr"] |
| limited = item["limited"].copy() |
| gt_res = item["gt_res"] |
|
|
| |
| p2 = dict(params) |
| multiband = p2.pop("multiband", False) |
| macro_expand = p2.pop("macro_expand", False) |
| macro_ratio = p2.pop("macro_ratio", 1.0) |
| lf_delta_db = p2.pop("lf_delta_db", p2.get("delta_db", 1.5)) |
| |
| |
|
|
| spade_kw = dict( |
| multiband = multiband, |
| macro_expand = macro_expand, |
| macro_ratio = macro_ratio if macro_expand else 1.0, |
| macro_release_ms = 200.0, |
| macro_attack_ms = 10.0, |
| ) |
| if multiband: |
| spade_kw["band_crossovers"] = (BAND_CROSSOVER_HZ,) |
| spade_kw["band_delta_db"] = (lf_delta_db, p2["delta_db"]) |
|
|
| p = DeclipParams(sample_rate=sr, **FIXED_SOLVER, **p2, **spade_kw) |
| fixed, _ = declip(limited, p) |
| fixed_2d = ensure_2d(fixed) |
|
|
| |
| res_raw = fixed_2d - limited |
| res_iter = normalize_peak(res_raw, RESIDUAL_DBFS) |
|
|
| return cosine_sim_tf(gt_res, res_iter, sr) |
|
|
| except Exception as exc: |
| warnings.warn(f"evaluate_one ({item['file']}): {exc}") |
| return None |
|
|
|
|
| |
| |
| |
|
|
| def make_objective(corpus: List[Dict]): |
| def objective(trial: "optuna.Trial") -> float: |
| |
| delta_db = trial.suggest_float("delta_db", 1.5, 3.5, step=0.05) |
| win_exp = trial.suggest_int ("win_exp", 9, 11) |
| win = 2 ** win_exp |
| hop_div = trial.suggest_categorical("hop_div", [4, 8]) |
| hop = win // hop_div |
| rel_ms = trial.suggest_float("release_ms", 10.0, 200.0, step=5.0) |
| gain_db = trial.suggest_float("max_gain_db", 2.0, 12.0, step=0.5) |
| eps = trial.suggest_categorical("eps", [0.03, 0.05, 0.1]) |
| max_iter = trial.suggest_categorical("max_iter", [250, 500, 1000]) |
|
|
| |
| |
| |
| |
| |
| multiband = trial.suggest_categorical("multiband", [False, True]) |
| macro_expand = trial.suggest_categorical("macro_expand", [False, True]) |
|
|
| |
| lf_delta_db = trial.suggest_float("lf_delta_db", 0.5, 2.0, step=0.05) |
| macro_ratio = trial.suggest_float("macro_ratio", 1.1, 2.0, step=0.05) |
|
|
| |
| |
| |
| |
| |
| lf_cutoff_hz = trial.suggest_categorical("lf_cutoff_hz", [0.0, 500.0, 1000.0, 2000.0]) |
| lf_k_min = trial.suggest_int("lf_k_min", 0, 16) |
| |
| |
|
|
| |
| |
|
|
| params = dict( |
| delta_db = delta_db, |
| window_length = win, |
| hop_length = hop, |
| release_ms = rel_ms, |
| max_gain_db = gain_db, |
| eps = eps, |
| max_iter = max_iter, |
| |
| multiband = multiband, |
| lf_delta_db = lf_delta_db, |
| macro_expand = macro_expand, |
| macro_ratio = macro_ratio, |
| |
| lf_cutoff_hz = lf_cutoff_hz, |
| lf_k_min = lf_k_min, |
| ) |
|
|
| scores = [] |
| |
| |
| |
| |
| |
| rng_shuffle = np.random.default_rng(trial.number) |
| shuffled_corpus = rng_shuffle.permutation(len(corpus)).tolist() |
| midpoint = len(corpus) // 2 |
|
|
| for step, idx in enumerate(shuffled_corpus): |
| item = corpus[idx] |
| sc = evaluate_one(item, dict(params)) |
| if sc is not None: |
| scores.append(sc) |
| if step == midpoint and scores: |
| trial.report(float(np.mean(scores)), step=step) |
| if trial.should_prune(): |
| raise optuna.TrialPruned() |
|
|
| if not scores: |
| return 0.0 |
| mean_score = float(np.mean(scores)) |
| trial.report(mean_score, step=len(corpus)) |
| return mean_score |
|
|
| return objective |
|
|
|
|
| |
| |
| |
|
|
| def print_report(study: "optuna.Study", top_n: int = 20): |
| trials = sorted( |
| [t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE], |
| key=lambda t: t.value or 0, reverse=True, |
| ) |
| if not trials: |
| print("Nessun trial completato.") |
| return |
|
|
| if _HAS_RICH: |
| _console.rule("[bold cyan]RISULTATI SWEEP BAYESIANO[/]") |
| tbl = Table(show_header=True, header_style="bold cyan", show_lines=False) |
| for col, w in [("#",4),("score",9),("ddb",6),("LFd",5),("win",6), |
| ("hop",4),("rel",6),("gain",6),("eps",5),("iter",5), |
| ("MB",3),("ME",3),("MR",5),("LFcut",6),("LFk",4)]: |
| tbl.add_column(col, justify="right", width=w) |
| for rank, t in enumerate(trials[:top_n], 1): |
| p = t.params |
| win = 2 ** p["win_exp"] |
| hop = win // p["hop_div"] |
| mb = "Y" if p.get("multiband") else "n" |
| me = "Y" if p.get("macro_expand") else "n" |
| lfc = p.get("lf_cutoff_hz", 0.0) |
| lfk = p.get("lf_k_min", 0) |
| sty = "bold green" if rank == 1 else ("yellow" if rank <= 3 else "") |
| tbl.add_row( |
| str(rank), f"{t.value:.5f}", |
| f"{p['delta_db']:.2f}", |
| f"{p.get('lf_delta_db', p['delta_db']):.2f}", |
| str(win), str(hop), |
| f"{p['release_ms']:.0f}", f"{p['max_gain_db']:.1f}", |
| str(p['eps']), str(p['max_iter']), |
| mb, me, f"{p.get('macro_ratio', 1.0):.2f}", |
| f"{lfc:.0f}", str(lfk), |
| style=sty, |
| ) |
| _console.print(tbl) |
| else: |
| hdr = (f"{'#':>3} {'score':>8} {'ddb':>5} {'LFd':>5} {'win':>5}" |
| f" {'hop':>4} {'rel':>6} {'gain':>5} {'eps':>5} {'iter':>5}" |
| f" {'MB':>3} {'ME':>3} {'MR':>5} {'LFcut':>6} {'LFk':>4}") |
| print(hdr); print("-" * len(hdr)) |
| for rank, t in enumerate(trials[:top_n], 1): |
| p = t.params |
| win = 2 ** p["win_exp"] |
| hop = win // p["hop_div"] |
| mb = "Y" if p.get("multiband") else "n" |
| me = "Y" if p.get("macro_expand") else "n" |
| lfc = p.get("lf_cutoff_hz", 0.0) |
| lfk = p.get("lf_k_min", 0) |
| print(f"{rank:>3} {t.value:>8.5f} {p['delta_db']:>5.2f}" |
| f" {p.get('lf_delta_db', p['delta_db']):>5.2f} {win:>5}" |
| f" {hop:>4} {p['release_ms']:>6.0f} {p['max_gain_db']:>5.1f}" |
| f" {str(p['eps']):>5} {p['max_iter']:>5}" |
| f" {mb:>3} {me:>3} {p.get('macro_ratio', 1.0):>5.2f}" |
| f" {lfc:>6.0f} {lfk:>4}") |
|
|
| best = trials[0] |
| p = best.params |
| win = 2 ** p["win_exp"] |
| hop = win // p["hop_div"] |
| n_pruned = sum(1 for t in study.trials |
| if t.state == optuna.trial.TrialState.PRUNED) |
|
|
| print("\n" + "═" * 60) |
| print("CONFIG OTTIMALE") |
| print("═" * 60) |
| print(f""" |
| params = DeclipParams( |
| algo = "sspade", |
| frame = "rdft", |
| mode = "soft", |
| delta_db = {p['delta_db']:.2f}, |
| window_length = {win}, |
| hop_length = {hop}, |
| release_ms = {p['release_ms']:.1f}, |
| max_gain_db = {p['max_gain_db']:.1f}, |
| eps = {p['eps']}, |
| max_iter = {p['max_iter']}, |
| sample_rate = sr, |
| multiband = {p.get('multiband', False)}, |
| band_crossovers = ({BAND_CROSSOVER_HZ},), |
| band_delta_db = ({p.get('lf_delta_db', p['delta_db']):.2f}, {p['delta_db']:.2f}), |
| macro_expand = {p.get('macro_expand', False)}, |
| macro_ratio = {p.get('macro_ratio', 1.0):.2f}, |
| lf_cutoff_hz = {p.get('lf_cutoff_hz', 0.0):.1f}, # v12 |
| lf_k_min = {p.get('lf_k_min', 0)}, # v12 |
| n_jobs = -1, |
| show_progress = True, |
| )""") |
| print(f"\n→ Best score : {best.value:.5f}") |
| print(f" Trials done : {len(trials)}") |
| print(f" Pruned : {n_pruned}") |
|
|
|
|
|
|
| |
| |
| |
|
|
| |
| |
| DEBUG_PARAMS = dict( |
| delta_db = 1.5, |
| window_length = 1024, |
| hop_length = 256, |
| release_ms = 100.0, |
| max_gain_db = 6.0, |
| eps = 0.05, |
| max_iter = 500, |
| ) |
|
|
|
|
| def _pk_dbfs(a: np.ndarray) -> float: |
| pk = float(np.max(np.abs(a))) |
| return 20.0 * np.log10(pk) if pk > 1e-12 else -999.0 |
|
|
|
|
| def _rms_dbfs(a: np.ndarray) -> float: |
| rms = float(np.sqrt(np.mean(a.astype(float) ** 2))) |
| return 20.0 * np.log10(rms) if rms > 1e-12 else -999.0 |
|
|
|
|
| def _write_wav(path: Path, audio: np.ndarray, sr: int) -> None: |
| """Scrive WAV float32 senza clipping. Avvisa se peak > 1.0.""" |
| a2d = ensure_2d(audio).astype(np.float32) |
| pk = float(np.max(np.abs(a2d))) |
| if pk > 1.0: |
| print(f" [WARN] {path.name}: peak={pk:.4f} > 1.0 " |
| f"(+{20*np.log10(pk):.2f} dBFS) — float32, non clippato") |
| sf.write(str(path), a2d, sr, subtype="FLOAT") |
|
|
|
|
| def debug_export( |
| corpus: list, |
| base_dir: Path, |
| out_dir: Path, |
| n_files: int, |
| spade_params: dict, |
| ) -> None: |
| """ |
| Esporta WAV di debug per i primi n_files item del corpus. |
| |
| Per ogni file vengono scritti 6 WAV float32: |
| 01_orig_with_noise drum + pink noise, normalizzato a 0 dBFS peak |
| (segnale prima del limiter) |
| 02_limited uscita del limiter sintetico (input a SPADE) |
| 03_gt_residual orig_with_noise - limited, @RESIDUAL_DBFS peak |
| 04_spade_output uscita SPADE (float32, puo' superare 0 dBFS) |
| 05_res_iter spade_output - limited, @RESIDUAL_DBFS peak |
| 06_diff_residuals gt_residual - res_iter |
| ideale = silenzio = -inf dB |
| |
| Stampa una tabella con peak dBFS e RMS dBFS per ogni traccia. |
| |
| Livelli ATTESI: |
| 01 peak = 0.00 dBFS (normalizzato) |
| 02 peak ~ -LIMITER_THRESHOLD_DB dBFS (es. -1.5 dBFS) |
| 03 peak = RESIDUAL_DBFS (es. -3.0 dBFS) |
| 04 peak puo' essere > 0 dBFS (transiente recuperato) |
| 05 peak = RESIDUAL_DBFS (es. -3.0 dBFS) |
| 06 peak << 0 dBFS (piu' basso = SPADE piu' vicino al GT) |
| """ |
| out_dir.mkdir(parents=True, exist_ok=True) |
| items = corpus[:n_files] |
| col_w = max(len(it["file"]) for it in items) + 2 |
|
|
| HDR = (f" {'file':<{col_w}} {'traccia':<22}" |
| f" {'peak dBFS':>10} {'RMS dBFS':>9} note") |
| SEP = " " + "-" * (len(HDR) - 2) |
|
|
| print() |
| if _HAS_RICH: |
| _console.rule("[bold cyan]DEBUG EXPORT[/]") |
| else: |
| print("=" * 65) |
| print("DEBUG EXPORT") |
| print("=" * 65) |
|
|
| print(f" Output dir : {out_dir}") |
| print(f" SPADE params : delta_db={spade_params['delta_db']}" |
| f" win={spade_params['window_length']}" |
| f" hop={spade_params['hop_length']}" |
| f" rel={spade_params['release_ms']}ms" |
| f" gain={spade_params['max_gain_db']}dB") |
| print(f" File esportati: {len(items)}") |
| print() |
| print(f" Livelli attesi:") |
| print(f" 01_orig_with_noise : ~ 0.00 dBFS (normalizzato prima del limiter)") |
| print(f" 02_limited : ~ {-LIMITER_THRESHOLD_DB:+.2f} dBFS (uscita limiter)") |
| print(f" 03_gt_residual : = {RESIDUAL_DBFS:+.2f} dBFS (normalizzato)") |
| print(f" 04_spade_output : > 0 dBFS possibile (transiente recuperato)") |
| print(f" 05_res_iter : = {RESIDUAL_DBFS:+.2f} dBFS (normalizzato)") |
| print(f" 06_diff_residuals : << 0 dBFS (piu' basso = pipeline piu' corretta)") |
| print() |
| print(HDR) |
|
|
| diff_peaks = [] |
|
|
| for file_index, item in enumerate(items): |
| sr = item["sr"] |
| limited = item["limited"].copy() |
| gt_res = item["gt_res"] |
| stem = Path(item["file"]).stem |
|
|
| |
| |
| orig_with_noise = None |
| for folder in DRUM_DIRS: |
| candidate = base_dir / folder / item["file"] |
| if candidate.exists(): |
| try: |
| raw, _ = sf.read(str(candidate), always_2d=True) |
| raw = raw.astype(float) |
| rng = np.random.default_rng(seed=file_index) |
| orig_0 = normalize_to_0dBFS(raw) |
| mixed = ensure_2d(mix_pink_noise(orig_0, sr, |
| PINK_NOISE_LEVEL_DB, rng)) |
| orig_with_noise = ensure_2d(normalize_to_0dBFS(mixed)) |
| except Exception: |
| pass |
| break |
|
|
| if orig_with_noise is None: |
| |
| gt_scale = 10 ** (RESIDUAL_DBFS / 20.0) |
| lim_peak = 10 ** (-LIMITER_THRESHOLD_DB / 20.0) |
| gt_raw = gt_res * (lim_peak / (gt_scale + 1e-12)) |
| orig_with_noise = ensure_2d(normalize_to_0dBFS(limited + gt_raw)) |
|
|
| |
| try: |
| p = DeclipParams(sample_rate=sr, **FIXED_SOLVER, **spade_params) |
| fixed, _ = declip(limited.copy(), p) |
| fixed_2d = ensure_2d(fixed) |
| except Exception as exc: |
| print(f" [ERRORE SPADE] {item['file']}: {exc}") |
| continue |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| res_raw = fixed_2d - limited |
|
|
| |
| |
| |
| |
| |
| gt_res_raw_approx = ensure_2d(orig_with_noise) - limited |
| L = min(gt_res_raw_approx.shape[0], res_raw.shape[0]) |
|
|
| |
| diff_raw = gt_res_raw_approx[:L] - res_raw[:L] |
|
|
| |
| g_flat = gt_res_raw_approx[:L, 0] if gt_res_raw_approx.ndim == 2 else gt_res_raw_approx[:L] |
| e_flat = res_raw[:L, 0] if res_raw.ndim == 2 else res_raw[:L] |
| cos_sim_td = float( |
| np.dot(g_flat, e_flat) / |
| (np.linalg.norm(g_flat) * np.linalg.norm(e_flat) + 1e-12) |
| ) |
|
|
| |
| |
| |
| |
| noise_gain_lin = 10 ** (PINK_NOISE_LEVEL_DB / 20.0) |
| |
| |
| noise_floor_db = 20 * np.log10(noise_gain_lin + 1e-12) + RESIDUAL_DBFS |
| |
|
|
| |
| diff_rms_db = _rms_dbfs(diff_raw[:L]) |
| gt_rms_db = _rms_dbfs(gt_res_raw_approx[:L]) |
| |
| diff_vs_gt_db = diff_rms_db - gt_rms_db |
|
|
| |
| res_iter = normalize_peak(res_raw, RESIDUAL_DBFS) |
| diff_norm = normalize_peak(diff_raw, RESIDUAL_DBFS) if np.max(np.abs(diff_raw)) > 1e-12 else diff_raw |
|
|
| diff_peaks.append((diff_vs_gt_db, cos_sim_td, diff_rms_db, gt_rms_db)) |
|
|
| |
| tracks = [ |
| ("01_orig_with_noise", |
| orig_with_noise, |
| f"drum+noise @0dBFS (input pipeline)"), |
| ("02_limited", |
| limited, |
| f"uscita limiter (input SPADE) atteso: ~{-LIMITER_THRESHOLD_DB:+.2f}dBFS"), |
| ("03_gt_residual", |
| gt_res, |
| f"GT residual @{RESIDUAL_DBFS:.0f}dBFS (include noise attenuation)"), |
| ("04_spade_output", |
| fixed_2d, |
| f"SPADE output (float32, puo' >0dBFS)"), |
| ("05_res_iter", |
| res_iter, |
| f"residual SPADE @{RESIDUAL_DBFS:.0f}dBFS (solo componente sparsa)"), |
| ("06_diff_residuals", |
| diff_norm, |
| f"GT - iter @{RESIDUAL_DBFS:.0f}dBFS " |
| f"cos_sim={cos_sim_td:.3f} diff/GT={diff_vs_gt_db:+.1f}dB " |
| f"noise_floor≈{noise_floor_db:+.1f}dB"), |
| ] |
|
|
| |
| |
| |
| ok_threshold = noise_floor_db + 6.0 |
| warn_threshold = ok_threshold + 10.0 |
|
|
| |
| print(SEP) |
| for track_name, audio, note in tracks: |
| pk = _pk_dbfs(audio) |
| rms = _rms_dbfs(audio) |
|
|
| flag = "" |
| if track_name == "06_diff_residuals": |
| if diff_vs_gt_db < -12: flag = "[OK] buona convergenza" |
| elif diff_vs_gt_db < -6: flag = "[~] convergenza parziale" |
| else: flag = "[WARN] diff elevato rispetto al GT" |
|
|
| row = (f" {item['file']:<{col_w}} {track_name:<22}" |
| f" {pk:>+10.2f} {rms:>+9.2f} {note} {flag}") |
|
|
| if _HAS_RICH: |
| color = ("green" if "[OK]" in flag else |
| "yellow" if "[~]" in flag else |
| "red" if "[WARN]" in flag else "") |
| colored_row = row.replace(flag, f"[{color or 'dim'}]{flag}[/]") if flag else row |
| _console.print(colored_row) |
| else: |
| print(row) |
|
|
| wav_path = out_dir / f"{stem}__{track_name}.wav" |
| _write_wav(wav_path, audio, sr) |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| def band_energy(audio_2d, sr, f_lo, f_hi): |
| """RMS energy in dB di una banda passante [f_lo, f_hi] Hz.""" |
| mono = audio_2d[:, 0] if audio_2d.ndim == 2 else audio_2d |
| N = len(mono) |
| if N < 8: |
| return -999.0 |
| |
| nyq = sr / 2.0 |
| lo = max(f_lo / nyq, 1e-4) |
| hi = min(f_hi / nyq, 0.9999) |
| if lo >= hi: |
| return -999.0 |
| if lo < 1e-3: |
| b, a = sig.butter(4, hi, btype="low") |
| else: |
| b, a = sig.butter(4, [lo, hi], btype="band") |
| filtered = sig.filtfilt(b, a, mono) |
| return _rms_dbfs(filtered) |
|
|
| BANDS = [ |
| ("Sub-bass ", 20, 80), |
| ("Bass ", 80, 250), |
| ("Low-mid ", 250, 800), |
| ("High-mid ", 800, 4000), |
| ("High ", 4000, 20000), |
| ] |
|
|
| gt_mono = gt_res[:, 0] if gt_res.ndim == 2 else gt_res |
| ri_mono = res_iter[:, 0] if res_iter.ndim == 2 else res_iter |
|
|
| |
| |
| gt_raw_for_bands = gt_res_raw_approx |
| iter_raw_for_bands = res_raw |
|
|
| print() |
| band_hdr = f" {'banda':<12} {'GT_res RMS':>10} {'SPADE rec RMS':>13} {'recovery':>9} {'limitato?'}" |
| print(f" Analisi spettrale per banda — {item['file']}") |
| print(f" {'─'*75}") |
| print(band_hdr) |
| print(f" {'─'*75}") |
| for bname, f_lo, f_hi in BANDS: |
| gt_db = band_energy(gt_raw_for_bands, sr, f_lo, f_hi) |
| iter_db = band_energy(iter_raw_for_bands, sr, f_lo, f_hi) |
| if gt_db < -60: |
| recovery_str = " — (silenzio)" |
| flag_b = "" |
| else: |
| diff_b = iter_db - gt_db |
| |
| if diff_b > -3: |
| flag_b = "OK" |
| elif diff_b > -9: |
| flag_b = "~ parziale" |
| else: |
| flag_b = "!! sotto-recupero" |
| recovery_str = f"{diff_b:>+7.1f} dB {flag_b}" |
| line = f" {bname:<12} {gt_db:>+10.1f} {iter_db:>+13.1f} {recovery_str}" |
| if _HAS_RICH: |
| color = "green" if "OK" in recovery_str else ( |
| "yellow" if "~" in recovery_str else ( |
| "red" if "!!" in recovery_str else "dim")) |
| _console.print(f"[{color}]{line}[/]") |
| else: |
| print(line) |
| print() |
|
|
| print(SEP) |
| print() |
| if diff_peaks: |
| vs_gt_vals = [d[0] for d in diff_peaks] |
| cos_vals = [d[1] for d in diff_peaks] |
| avg_vs_gt = float(np.mean(vs_gt_vals)) |
| best_vs_gt = float(np.min(vs_gt_vals)) |
| worst_vs_gt = float(np.max(vs_gt_vals)) |
| avg_cos = float(np.mean(cos_vals)) |
|
|
| noise_floor_db = 20 * np.log10(10 ** (PINK_NOISE_LEVEL_DB / 20.0) + 1e-12) + RESIDUAL_DBFS |
|
|
| print(f" RIEPILOGO 06_diff_residuals:") |
| print(f" diff/GT_rms media : {avg_vs_gt:>+7.2f} dB (0 dB = diff grande quanto GT)") |
| print(f" diff/GT_rms migliore: {best_vs_gt:>+7.2f} dB") |
| print(f" diff/GT_rms peggiore: {worst_vs_gt:>+7.2f} dB") |
| print(f" cos_sim TD media : {avg_cos:>8.4f} (1.0 = identici)") |
| print() |
| print(f" NOTA IMPORTANTE:") |
| print(f" Il rumore rosa ({PINK_NOISE_LEVEL_DB} dB) fa parte del GT_res ma") |
| print(f" NON puo' essere recuperato da SPADE (non e' sparso).") |
| print(f" Floor teorico del diff: ≈ {noise_floor_db:+.1f} dBFS — questo e' il") |
| print(f" limite fisico massimo raggiungibile con questo corpus.") |
| print(f" Un diff/GT < -6 dB indica buona convergenza di SPADE.") |
| print() |
| if worst_vs_gt < -12: |
| verdict = "OK Convergenza eccellente — SPADE recupera bene i transienti" |
| elif worst_vs_gt < -6: |
| verdict = "~ Convergenza buona — residuo compatibile con il noise floor" |
| else: |
| verdict = "INFO diff dominato dal rumore rosa — comportamento atteso e corretto" |
| print(f" Verdetto: {verdict}") |
| print(f"\n WAV scritti in : {out_dir}/") |
| print(f" Formato : float32, nessun clipping (usa un editor che supporta >0dBFS)") |
| print(f" Nomenclatura : <stem>__<N>_<traccia>.wav") |
|
|
|
|
| def save_csv(study: "optuna.Study"): |
| import csv |
| trials = sorted( |
| [t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE], |
| key=lambda t: t.value or 0, reverse=True, |
| ) |
| with open(OUT_CSV, "w", newline="") as f: |
| w = csv.writer(f) |
| w.writerow(["rank", "score", "delta_db", "lf_delta_db", |
| "window_length", "hop_length", "release_ms", "max_gain_db", |
| "eps", "max_iter", "multiband", "macro_expand", "macro_ratio"]) |
| for rank, t in enumerate(trials, 1): |
| p = t.params |
| win = 2 ** p["win_exp"] |
| hop = win // p["hop_div"] |
| w.writerow([ |
| rank, round(t.value, 6), |
| p["delta_db"], |
| round(p.get("lf_delta_db", p["delta_db"]), 2), |
| win, hop, |
| p["release_ms"], p["max_gain_db"], p["eps"], p["max_iter"], |
| int(p.get("multiband", False)), |
| int(p.get("macro_expand", False)), |
| round(p.get("macro_ratio", 1.0), 2), |
| ]) |
| print(f"\n 📄 CSV: {OUT_CSV}") |
|
|
|
|
| |
| |
| |
|
|
| def parse_args(): |
| ap = argparse.ArgumentParser(description="Smart Bayesian sweep per S-SPADE v2") |
| ap.add_argument("--trials", type=int, default=200, |
| help="Numero di trial Optuna (default: 200)") |
| ap.add_argument("--resume", action="store_true", |
| help="Carica lo study esistente e aggiunge trial") |
| ap.add_argument("--report", action="store_true", |
| help="Solo report (nessun nuovo trial)") |
| ap.add_argument("--base-dir", type=str, default=".", |
| help="Cartella radice con Kicks/Snares/Perc/Tops") |
| ap.add_argument("--corpus-size", type=int, default=None, |
| help="Limita il corpus a N file (None = tutti)") |
| ap.add_argument("--top", type=int, default=20, |
| help="Quanti trial mostrare nel ranking (default: 20)") |
| ap.add_argument("--no-prune", action="store_true", |
| help="Disabilita MedianPruner (più lento ma completo)") |
| ap.add_argument("--debug-export", action="store_true", |
| help="Esporta WAV di debug per i primi N file del corpus (no sweep)") |
| ap.add_argument("--debug-dir", type=str, default="debug_export", |
| help="Cartella output WAV di debug (default: debug_export)") |
| ap.add_argument("--debug-n", type=int, default=10, |
| help="Quanti file esportare in debug (default: 10)") |
| return ap.parse_args() |
|
|
|
|
| def main(): |
| args = parse_args() |
|
|
| missing = [] |
| if not _HAS_OPTUNA: missing.append("optuna") |
| if not _HAS_SPADE: missing.append("spade_declip_v11.py (nella stessa dir)") |
| if missing: |
| pip = [m for m in missing if not m.endswith(")")] |
| sys.exit("Mancante:\n pip install " + " ".join(pip) |
| + ("\n " + "\n ".join(m for m in missing if m.endswith(")")) if any(m.endswith(")") for m in missing) else "")) |
|
|
| base_dir = Path(args.base_dir).resolve() |
| storage = f"sqlite:///{STUDY_NAME}.db" |
| sampler = TPESampler(seed=42, multivariate=True, warn_independent_sampling=False) |
| pruner = (MedianPruner(n_startup_trials=10, n_warmup_steps=3) |
| if not args.no_prune else optuna.pruners.NopPruner()) |
|
|
| if args.report: |
| try: |
| study = optuna.load_study(study_name=STUDY_NAME, storage=storage, |
| sampler=sampler, pruner=pruner) |
| except Exception: |
| sys.exit(f"Nessuno study trovato in {STUDY_NAME}.db") |
| print_report(study, top_n=args.top) |
| save_csv(study) |
| return |
|
|
| |
| if args.debug_export: |
| |
| spade_params = dict(DEBUG_PARAMS) |
| try: |
| study = optuna.load_study(study_name=STUDY_NAME, storage=storage, |
| sampler=sampler, pruner=pruner) |
| completed = [t for t in study.trials |
| if t.state == optuna.trial.TrialState.COMPLETE] |
| if completed: |
| best_t = max(completed, key=lambda t: t.value or 0) |
| p = best_t.params |
| win = 2 ** p["win_exp"] |
| hop = win // p["hop_div"] |
| spade_params = dict( |
| delta_db = p["delta_db"], |
| window_length = win, |
| hop_length = hop, |
| release_ms = p["release_ms"], |
| max_gain_db = p["max_gain_db"], |
| eps = p["eps"], |
| max_iter = p["max_iter"], |
| ) |
| print(f" [DEBUG] Usando best trial #{best_t.number}" |
| f" (score={best_t.value:.5f}) dal DB.") |
| except Exception: |
| print(f" [DEBUG] DB non trovato — uso DEBUG_PARAMS di default.") |
|
|
| |
| corpus = build_corpus(base_dir, max_files=args.debug_n) |
| if not corpus: |
| sys.exit("Corpus vuoto. Controlla --base-dir.") |
| debug_export( |
| corpus = corpus, |
| base_dir = base_dir, |
| out_dir = Path(args.debug_dir), |
| n_files = args.debug_n, |
| spade_params = spade_params, |
| ) |
| return |
|
|
| |
| print("\n" + "=" * 65) |
| print("CORPUS + LIMITER SINTETICO (Case 1 — threshold-based)") |
| print("=" * 65) |
| print(f" Base dir : {base_dir}") |
| print(f" Threshold : −{LIMITER_THRESHOLD_DB} dBFS") |
| print(f" Release : {LIMITER_RELEASE_MS} ms") |
| print(f" Level align: NESSUNO — loudness invariata per costruzione") |
| print(f" Rumore rosa: {PINK_NOISE_LEVEL_DB} dB rel. peak " |
| f"(simula sottofondo musicale sotto il transiente)") |
|
|
| corpus = build_corpus(base_dir, max_files=args.corpus_size) |
| if not corpus: |
| sys.exit("Corpus vuoto. Controlla --base-dir e le cartelle.") |
|
|
| print(f"\n ✓ {len(corpus)} file nel corpus\n") |
| col_w = max(len(item["file"]) for item in corpus) + 2 |
| for item in corpus: |
| rms = float(np.sqrt(np.mean(item["gt_res"] ** 2))) |
| peak = float(np.max(np.abs(item["gt_res"]))) |
| print(f" {item['file']:<{col_w}} sr={item['sr']} " |
| f"GT rms={rms:.4f} peak={peak:.4f}") |
|
|
| |
| print(f"\n{'='*65}") |
| print(f"OTTIMIZZAZIONE BAYESIANA — {args.trials} trial") |
| print(f"TPE (multivariate) + MedianPruner | storage: {STUDY_NAME}.db") |
| print(f"{'='*65}\n") |
|
|
| study = optuna.create_study( |
| study_name = STUDY_NAME, |
| storage = storage, |
| sampler = sampler, |
| pruner = pruner, |
| direction = "maximize", |
| load_if_exists = True, |
| ) |
|
|
| |
| try: |
| from rich.progress import ( |
| Progress, BarColumn, TextColumn, |
| TimeElapsedColumn, TimeRemainingColumn, MofNCompleteColumn, |
| ) |
| _has_rich_progress = True |
| except ImportError: |
| _has_rich_progress = False |
|
|
| try: |
| import tqdm as _tqdm_mod |
| _has_tqdm = True |
| except ImportError: |
| _has_tqdm = False |
|
|
| |
| |
| |
| _existing_complete = [t for t in study.trials |
| if t.state == optuna.trial.TrialState.COMPLETE] |
| _existing_pruned = [t for t in study.trials |
| if t.state == optuna.trial.TrialState.PRUNED] |
|
|
| if _existing_complete: |
| _best_existing = max(_existing_complete, key=lambda t: t.value or 0) |
| _init_best = _best_existing.value or 0.0 |
| _init_best_p = dict(_best_existing.params) |
| _init_last = _init_best |
| else: |
| _init_best, _init_best_p, _init_last = float("-inf"), {}, float("-inf") |
|
|
| _state = { |
| "done": len(_existing_complete), |
| "pruned": len(_existing_pruned), |
| "best": _init_best, |
| "best_p": _init_best_p, |
| "last": _init_last, |
| "t0": time.time(), |
| "n_total": len(_existing_complete) + len(_existing_pruned) + args.trials, |
| } |
|
|
| def _fmt_best(state: dict) -> str: |
| """Stringa compatta con i parametri del best trial corrente.""" |
| bp = state["best_p"] |
| if not bp: |
| return "—" |
| win = 2 ** bp.get("win_exp", 10) |
| hop = win // bp.get("hop_div", 4) |
| return (f"δ={bp.get('delta_db',0):.2f} " |
| f"win={win} hop={hop} " |
| f"rel={bp.get('release_ms',0):.0f}ms " |
| f"gain={bp.get('max_gain_db',0):.1f}dB") |
|
|
| |
| if _has_rich_progress: |
| progress = Progress( |
| TextColumn("[bold cyan]Trial[/] [cyan]{task.completed}/{task.total}[/]"), |
| BarColumn(bar_width=32), |
| MofNCompleteColumn(), |
| TextColumn(" score [green]{task.fields[last]:.5f}[/]"), |
| TextColumn(" best [bold green]{task.fields[best]:.5f}[/]"), |
| TextColumn(" [dim]pruned {task.fields[pruned]}[/]"), |
| TimeElapsedColumn(), |
| TextColumn("ETA"), |
| TimeRemainingColumn(), |
| refresh_per_second=4, |
| transient=False, |
| ) |
| task_id = None |
|
|
| def on_trial_end(study, trial): |
| fin = (trial.state == optuna.trial.TrialState.COMPLETE) |
| prn = (trial.state == optuna.trial.TrialState.PRUNED) |
| if fin: |
| _state["done"] += 1 |
| _state["last"] = trial.value or 0.0 |
| if _state["last"] > _state["best"]: |
| _state["best"] = _state["last"] |
| _state["best_p"] = dict(study.best_params) |
| elif prn: |
| _state["pruned"] += 1 |
| progress.update( |
| task_id, |
| advance = 1, |
| last = _state["last"], |
| best = max(_state["best"], 0.0), |
| pruned = _state["pruned"], |
| ) |
|
|
| t0 = time.time() |
| try: |
| with progress: |
| task_id = progress.add_task( |
| "sweep", |
| total = _state["n_total"], |
| completed = _state["done"] + _state["pruned"], |
| last = max(_state["last"], 0.0), |
| best = max(_state["best"], 0.0), |
| pruned = _state["pruned"], |
| ) |
| study.optimize( |
| make_objective(corpus), |
| n_trials = args.trials, |
| callbacks = [on_trial_end], |
| show_progress_bar = False, |
| ) |
| except KeyboardInterrupt: |
| print("\n[!] Interrotto — risultati parziali salvati.") |
|
|
| |
| elif _has_tqdm: |
| import tqdm |
| _already = _state["done"] + _state["pruned"] |
| pbar = tqdm.tqdm( |
| total = _state["n_total"], |
| initial = _already, |
| unit = "trial", |
| bar_format = "{l_bar}{bar}| {n}/{total} [{elapsed}<{remaining}]", |
| ) |
| if _already > 0: |
| pbar.set_postfix( |
| score = f"{max(_state['last'], 0.0):.5f}", |
| best = f"{max(_state['best'], 0.0):.5f}", |
| pruned = _state["pruned"], |
| ) |
|
|
| def on_trial_end(study, trial): |
| fin = trial.state == optuna.trial.TrialState.COMPLETE |
| prn = trial.state == optuna.trial.TrialState.PRUNED |
| if fin: |
| _state["done"] += 1 |
| _state["last"] = trial.value or 0.0 |
| if _state["last"] > _state["best"]: |
| _state["best"] = _state["last"] |
| _state["best_p"] = dict(study.best_params) |
| elif prn: |
| _state["pruned"] += 1 |
| pbar.update(1) |
| pbar.set_postfix( |
| score = f"{_state['last']:.5f}", |
| best = f"{_state['best']:.5f}", |
| pruned = _state["pruned"], |
| ) |
|
|
| t0 = time.time() |
| try: |
| study.optimize( |
| make_objective(corpus), |
| n_trials = args.trials, |
| callbacks = [on_trial_end], |
| show_progress_bar = False, |
| ) |
| except KeyboardInterrupt: |
| print("\n[!] Interrotto — risultati parziali salvati.") |
| finally: |
| pbar.close() |
|
|
| |
| else: |
| def on_trial_end(study, trial): |
| fin = trial.state == optuna.trial.TrialState.COMPLETE |
| prn = trial.state == optuna.trial.TrialState.PRUNED |
| if fin: |
| _state["done"] += 1 |
| _state["last"] = trial.value or 0.0 |
| if _state["last"] > _state["best"]: |
| _state["best"] = _state["last"] |
| _state["best_p"] = dict(study.best_params) |
| elapsed = time.time() - _state["t0"] |
| done_tot = _state["done"] + _state["pruned"] |
| eta_s = (elapsed / done_tot) * (_state["n_total"] - done_tot) if done_tot else 0 |
| is_best = abs(_state["last"] - _state["best"]) < 1e-9 |
| bar_n = int(32 * done_tot / max(_state["n_total"], 1)) |
| bar = "█" * bar_n + "░" * (32 - bar_n) |
| print(f"\r[{bar}] {done_tot}/{_state['n_total']}" |
| f" {'★' if is_best else ' '}score={_state['last']:.5f}" |
| f" best={_state['best']:.5f}" |
| f" pruned={_state['pruned']}" |
| f" ETA {eta_s/60:.1f}min ", end="", flush=True) |
| elif prn: |
| _state["pruned"] += 1 |
|
|
| t0 = time.time() |
| try: |
| study.optimize( |
| make_objective(corpus), |
| n_trials = args.trials, |
| callbacks = [on_trial_end], |
| show_progress_bar = False, |
| ) |
| except KeyboardInterrupt: |
| print("\n[!] Interrotto — risultati parziali salvati.") |
| print() |
|
|
| elapsed = time.time() - t0 |
| n_done = sum(1 for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE) |
| n_prune = sum(1 for t in study.trials if t.state == optuna.trial.TrialState.PRUNED) |
| print(f"\n Completati: {n_done} | Pruned: {n_prune}" |
| f" | Tempo totale: {elapsed/60:.1f} min" |
| f" | Media: {elapsed/max(n_done+n_prune,1):.1f} s/trial") |
|
|
| print_report(study, top_n=args.top) |
| save_csv(study) |
| print("\nDone.") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|