Spaces:
Runtime error
Runtime error
import numpy as np | |
import torch | |
import librosa | |
import gradio as gr | |
from transformers import AutoModelForAudioClassification | |
import logging | |
logging.basicConfig(level=logging.INFO) | |
model_path = "./" | |
model = AutoModelForAudioClassification.from_pretrained(model_path) | |
def preprocess_audio(audio_path, sr=22050): | |
audio, sr = librosa.load(audio_path, sr=sr) | |
audio, _ = librosa.effects.trim(audio) | |
return audio, sr | |
def extract_patches(S_DB, patch_size=16, patch_overlap=6): | |
stride = patch_size - patch_overlap | |
num_patches_time = (S_DB.shape[1] - patch_overlap) // stride | |
num_patches_freq = (S_DB.shape[0] - patch_overlap) // stride | |
patches = [] | |
for i in range(0, num_patches_freq * stride, stride): | |
for j in range(0, num_patches_time * stride, stride): | |
patch = S_DB[i:i+patch_size, j:j+patch_size] | |
if patch.shape == (patch_size, patch_size): | |
patches.append(patch.reshape(-1)) | |
return np.stack(patches) if patches else np.empty((0, patch_size*patch_size)) | |
def extract_features(audio, sr): | |
S = librosa.feature.melspectrogram(y=audio, sr=sr, n_mels=128, hop_length=512, n_fft=2048) | |
S_DB = librosa.power_to_db(S, ref=np.max) | |
patches = extract_patches(S_DB) | |
# Assuming each patch is flattened to a vector of size 256 (16*16) and then projected to 768 dimensions | |
# Here we simulate this projection by creating a dummy tensor, in practice, this should be done by a learned linear layer | |
patches_tensor = torch.tensor(patches).float() | |
# Simulate linear projection (e.g., via a fully connected layer) to match the embedding size | |
if patches_tensor.nelement() == 0: # Handle case of no patches | |
patch_embeddings_tensor = torch.empty(0, 768) | |
else: | |
patch_embeddings_tensor = patches_tensor # This is a placeholder, replace with actual projection | |
return patch_embeddings_tensor.unsqueeze(0) # Add batch dimension for compatibility with model | |
def predict_voice(audio_file_path): | |
try: | |
audio, sr = preprocess_audio(audio_file_path) | |
features = extract_features(audio, sr) | |
# Adjust the features size to match the model input, if necessary | |
# Example: Reshape or pad the features tensor | |
# features = adjust_features_shape(features, expected_shape) | |
with torch.no_grad(): | |
outputs = model(features) | |
logits = outputs.logits | |
predicted_index = logits.argmax() | |
label = model.config.id2label[predicted_index.item()] | |
confidence = torch.softmax(logits, dim=1).max().item() * 100 | |
result = f"The voice is classified as '{label}' with a confidence of {confidence:.2f}%." | |
logging.info("Prediction successful.") | |
except Exception as e: | |
result = f"Error during processing: {e}" | |
logging.error(result) | |
return result | |
iface = gr.Interface( | |
fn=predict_voice, | |
inputs=gr.Audio(label="Upload Audio File", type="filepath"), | |
outputs=gr.Text(label="Prediction"), | |
title="Voice Authenticity Detection", | |
description="This system uses advanced audio processing to detect whether a voice is real or AI-generated. Upload an audio file to see the results." | |
) | |
iface.launch() | |