|
|
import os |
|
|
import torch |
|
|
import soundfile as sf |
|
|
import librosa |
|
|
import pandas as pd |
|
|
import numpy as np |
|
|
from transformers import AutoProcessor, AutoModel |
|
|
from tqdm import tqdm |
|
|
from datetime import datetime |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ROOT_DATA = "/lium/raid-b/mshamsi/FreeSound_Popularity/" |
|
|
OUTPUT_PATH = "embeddings_mert_all_datasets.csv" |
|
|
LOG_PATH = "errors_mert.log" |
|
|
|
|
|
TARGET_SR = 24000 |
|
|
MAX_DURATION = 60 |
|
|
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
|
|
AUDIO_EXTENSIONS = (".wav", ".WAV", ".mp3", ".flac", ".ogg", ".m4a") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with open(LOG_PATH, "w") as f: |
|
|
f.write("=== MERT EXTRACTION LOG ===\n") |
|
|
f.write(str(datetime.now()) + "\n\n") |
|
|
|
|
|
def log_error(msg): |
|
|
with open(LOG_PATH, "a") as f: |
|
|
f.write(msg + "\n") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
processor = AutoProcessor.from_pretrained( |
|
|
"m-a-p/MERT-v1-330M", |
|
|
trust_remote_code=True |
|
|
) |
|
|
|
|
|
model = AutoModel.from_pretrained( |
|
|
"m-a-p/MERT-v1-330M", |
|
|
trust_remote_code=True |
|
|
).to(DEVICE) |
|
|
|
|
|
model.eval() |
|
|
|
|
|
except Exception as e: |
|
|
log_error(f"[FATAL] Model loading failed: {e}") |
|
|
raise RuntimeError("Impossible de charger le modèle MERT") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
datasets = [ |
|
|
d for d in os.listdir(ROOT_DATA) |
|
|
if os.path.isdir(os.path.join(ROOT_DATA, d)) |
|
|
] |
|
|
|
|
|
for dataset_name in datasets: |
|
|
dataset_path = os.path.join(ROOT_DATA, dataset_name) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
rows = [] |
|
|
processed = 0 |
|
|
skipped = 0 |
|
|
|
|
|
for batch in ["batch_001", "batch_002"]: |
|
|
batch_path = os.path.join(dataset_path, batch) |
|
|
|
|
|
if not os.path.exists(batch_path): |
|
|
log_error(f"[INFO] Missing folder: {batch_path}") |
|
|
continue |
|
|
|
|
|
audio_files = [ |
|
|
f for f in os.listdir(batch_path) |
|
|
if f.lower().endswith(AUDIO_EXTENSIONS) |
|
|
] |
|
|
|
|
|
for audio_file in tqdm(audio_files, desc=f"{dataset_name}/{batch}"): |
|
|
audio_path = os.path.join(batch_path, audio_file) |
|
|
|
|
|
try: |
|
|
|
|
|
|
|
|
|
|
|
audio, sr = sf.read(audio_path, always_2d=False) |
|
|
|
|
|
if audio is None or len(audio) == 0: |
|
|
raise ValueError("Empty audio file") |
|
|
|
|
|
|
|
|
if audio.ndim > 1: |
|
|
audio = np.mean(audio, axis=1) |
|
|
|
|
|
|
|
|
audio = audio.astype(np.float32) |
|
|
|
|
|
|
|
|
if sr != TARGET_SR: |
|
|
audio = librosa.resample( |
|
|
audio, |
|
|
orig_sr=sr, |
|
|
target_sr=TARGET_SR |
|
|
) |
|
|
|
|
|
|
|
|
max_len = TARGET_SR * MAX_DURATION |
|
|
audio = audio[:max_len] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inputs = processor( |
|
|
audio, |
|
|
sampling_rate=TARGET_SR, |
|
|
return_tensors="pt" |
|
|
) |
|
|
inputs = {k: v.to(DEVICE) for k, v in inputs.items()} |
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = model(**inputs) |
|
|
|
|
|
if not hasattr(outputs, "last_hidden_state"): |
|
|
raise RuntimeError("Invalid model output") |
|
|
|
|
|
embedding = ( |
|
|
outputs.last_hidden_state |
|
|
.mean(dim=1) |
|
|
.squeeze() |
|
|
.cpu() |
|
|
.numpy() |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
row = { |
|
|
"dataset": dataset_name, |
|
|
"batch": batch, |
|
|
"filename": audio_file |
|
|
} |
|
|
|
|
|
for i, val in enumerate(embedding): |
|
|
row[f"mert_{i}"] = float(val) |
|
|
|
|
|
rows.append(row) |
|
|
processed += 1 |
|
|
|
|
|
except Exception as e: |
|
|
skipped += 1 |
|
|
log_error(f"[ERROR] {audio_path} -> {e}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
df = pd.DataFrame(rows) |
|
|
df.to_csv(os.path.join(dataset_path, OUTPUT_PATH), index=False) |
|
|
|
|
|
print("\n=== EXTRACTION TERMINÉE ===") |
|
|
print(f"Dataset : {dataset_name}") |
|
|
print(f"Fichiers traités : {processed}") |
|
|
print(f"Fichiers ignorés : {skipped}") |
|
|
print(f"CSV sauvegardé : {OUTPUT_PATH}") |
|
|
print(f"Log erreurs : {LOG_PATH}") |
|
|
|