Spaces:
Sleeping
Sleeping
File size: 6,937 Bytes
a8d4d3c e64f574 a8d4d3c e64f574 a8d4d3c 62ebebb a8d4d3c e64f574 a8d4d3c e64f574 a8d4d3c 78680b9 62ebebb a8d4d3c a85a25f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 |
import os
import gradio as gr
import transformers
import numpy as np
import librosa
import spaces
# ---------------------------
# Quiet OpenMP noise on Spaces
# ---------------------------
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
# ---------------------------
# Model config
# ---------------------------
MODEL_ID = "sarvamai/shuka_v1"
TARGET_SR = 16000 # Shuka uses 16k audio
# ---------------------------
# Global pipeline (lazy-loaded)
# ---------------------------
pipe = None
def load_model():
"""Load the Shuka v1 pipeline (8.73B)."""
global pipe
if pipe is not None:
return "β
Model already loaded!"
try:
print(f"Loading Shuka model: {MODEL_ID}")
pipe = transformers.pipeline(
model=MODEL_ID,
trust_remote_code=True, # required for Shuka custom pipeline
device_map="auto", # Use auto device mapping for HF Spaces
torch_dtype="bfloat16",
)
print("β
Pipeline loaded successfully!")
return "β
Model pipeline loaded successfully!"
except Exception as e:
import traceback
err = f"β Error loading model: {e}\n\n{traceback.format_exc()}"
print(err)
return err
# ---------------------------
# Audio utilities
# ---------------------------
def load_audio_from_gradio(audio_input):
"""
Supports both gr.Audio types:
- type="numpy" -> (sample_rate, np.ndarray)
- type="filepath" -> "/tmp/....wav"
Returns (audio: float32 mono @ 16k, sr: int)
"""
if isinstance(audio_input, tuple):
sr, audio = audio_input
elif isinstance(audio_input, str):
# Read from tmp filepath
audio, sr = librosa.load(audio_input, sr=None)
else:
raise ValueError(f"Unsupported audio input type: {type(audio_input)}")
# Ensure float32 ndarray
audio = np.asarray(audio, dtype=np.float32)
# Stereo -> mono
if audio.ndim > 1:
audio = np.mean(audio, axis=1)
# Trim leading/trailing silence (conservative)
audio, _ = librosa.effects.trim(audio, top_db=30)
# Remove DC offset
if audio.size:
audio = audio - float(np.mean(audio))
# Normalize peak to ~0.98 to improve quiet recordings
peak = float(np.max(np.abs(audio))) if audio.size else 0.0
if peak > 0:
audio = (0.98 / peak) * audio
# Resample to 16k
if sr != TARGET_SR:
audio = librosa.resample(audio, orig_sr=sr, target_sr=TARGET_SR)
sr = TARGET_SR
# CRITICAL: Whisper encoder has hard limit of 3000 mel features
# At 16kHz, this equals exactly 30 seconds (100 mel features/second)
max_sec = 30
if len(audio) / float(sr) > max_sec:
audio = audio[: int(max_sec * sr)]
return audio, sr
# ---------------------------
# Inference handler
# ---------------------------
@spaces.GPU
def analyze_audio(audio_file, system_prompt):
"""
System prompt contains analysis instructions.
Audio is processed using the <|audio|> placeholder token.
"""
global pipe
if pipe is None:
status = load_model()
if status.startswith("β"):
return status
if audio_file is None:
return "β Please upload or record an audio file."
# Load & preprocess audio
try:
audio, sr = load_audio_from_gradio(audio_file)
except Exception as e:
return f"β Failed to read/process audio: {e}"
# Quick quality checks
dur = len(audio) / float(sr) if sr else 0
rms = float(np.sqrt(np.mean(audio**2))) if audio.size else 0.0
if dur < 1.0:
return "β Audio too short (<1s). Please upload a longer sample."
if rms < 1e-3:
return "β Audio extremely quiet. Increase mic gain or speak closer to the microphone."
sys_text = (system_prompt or "Respond naturally and informatively.").strip()
# Build turns: system message with user instructions + user message with audio token
turns = [
{"role": "system", "content": sys_text},
{"role": "user", "content": "<|audio|>"}
]
try:
out = pipe(
{"audio": audio, "turns": turns, "sampling_rate": sr},
max_new_tokens=512,
)
# Debug: print raw output
print(f"Raw output type: {type(out)}")
print(f"Raw output: {out}")
# Extract text from response
if isinstance(out, list) and len(out) > 0:
text = out[0].get("generated_text", str(out[0]))
elif isinstance(out, dict):
text = out.get("generated_text", str(out))
else:
text = str(out)
return f"β
Processed.\n\n{text}"
except Exception as e:
import traceback
error_details = traceback.format_exc()
print(f"Full error: {error_details}")
return f"β Inference error: {e}\n\nDetails:\n{error_details}"
# ---------------------------
# UI
# ---------------------------
startup_status = "β³ Model loads on first request (8.73B parameters)."
with gr.Blocks(title="Shuka v1 (8.73B) β Audio Analyzer", theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# π€ Shuka v1 (8.73B) β Audio Analyzer
Upload an audio file (or record) and provide **analysis instructions**.
The instructions tell the AI what to analyze in the audio using the `<|audio|>` token.
**Shuka** is a multilingual audio-language model with strong capabilities in **11 Indic languages** including Hindi, Bengali, Tamil, Telugu, Marathi, Gujarati, Kannada, Malayalam, Punjabi, Odia, and Assamese.
β οΈ **Note:** Audio is automatically capped at **30 seconds maximum** due to Whisper encoder constraints (3000 mel features limit). For best results, use clear, concise audio recordings.
""")
with gr.Row():
with gr.Column():
# For uploads, `filepath` is robust; mic also works.
audio_input = gr.Audio(
label="π΅ Upload or Record Audio",
sources=["upload", "microphone"],
type="filepath", # handler also supports numpy tuples
)
system_prompt = gr.Textbox(
label="π§ Analysis Instructions (what should the AI analyze in the audio?)",
value="Respond naturally and informatively.",
lines=8,
max_lines=20,
)
submit_btn = gr.Button("π Analyze", variant="primary")
with gr.Column():
output = gr.Markdown(
label="π€ Model Response",
value=f"**Model Status:** {startup_status}",
)
submit_btn.click(
fn=analyze_audio,
inputs=[audio_input, system_prompt],
outputs=output,
)
if __name__ == "__main__":
demo.launch()
|