Spaces:
Running
Running
Commit
•
bbf6234
1
Parent(s):
07fa958
Upload folder using huggingface_hub
Browse files
README.md
CHANGED
@@ -5,7 +5,7 @@ emoji: 🔥
|
|
5 |
colorFrom: indigo
|
6 |
colorTo: indigo
|
7 |
sdk: gradio
|
8 |
-
sdk_version: 4.44.
|
9 |
app_file: run.py
|
10 |
pinned: false
|
11 |
hf_oauth: true
|
|
|
5 |
colorFrom: indigo
|
6 |
colorTo: indigo
|
7 |
sdk: gradio
|
8 |
+
sdk_version: 4.44.1
|
9 |
app_file: run.py
|
10 |
pinned: false
|
11 |
hf_oauth: true
|
run.ipynb
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_asr"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch torchaudio transformers"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import pipeline\n", "import numpy as np\n", "\n", "transcriber = pipeline(\"automatic-speech-recognition\", model=\"openai/whisper-base.en\")\n", "\n", "def transcribe(stream, new_chunk):\n", " sr, y = new_chunk\n", " y = y.astype(np.float32)\n", " y /= np.max(np.abs(y))\n", "\n", " if stream is not None:\n", " stream = np.concatenate([stream, y])\n", " else:\n", " stream = y\n", " return stream, transcriber({\"sampling_rate\": sr, \"raw\": stream})[\"text\"] # type: ignore\n", "\n", "demo = gr.Interface(\n", " transcribe,\n", " [\"state\", gr.Audio(sources=[\"microphone\"], streaming=True)],\n", " [\"state\", \"text\"],\n", " live=True,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_asr"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch torchaudio transformers"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import pipeline\n", "import numpy as np\n", "\n", "transcriber = pipeline(\"automatic-speech-recognition\", model=\"openai/whisper-base.en\")\n", "\n", "def transcribe(stream, new_chunk):\n", " sr, y = new_chunk\n", " \n", " # Convert to mono if stereo\n", " if y.ndim > 1:\n", " y = y.mean(axis=1)\n", " \n", " y = y.astype(np.float32)\n", " y /= np.max(np.abs(y))\n", "\n", " if stream is not None:\n", " stream = np.concatenate([stream, y])\n", " else:\n", " stream = y\n", " return stream, transcriber({\"sampling_rate\": sr, \"raw\": stream})[\"text\"] # type: ignore\n", "\n", "demo = gr.Interface(\n", " transcribe,\n", " [\"state\", gr.Audio(sources=[\"microphone\"], streaming=True)],\n", " [\"state\", \"text\"],\n", " live=True,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
run.py
CHANGED
@@ -6,6 +6,11 @@ transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-bas
|
|
6 |
|
7 |
def transcribe(stream, new_chunk):
|
8 |
sr, y = new_chunk
|
|
|
|
|
|
|
|
|
|
|
9 |
y = y.astype(np.float32)
|
10 |
y /= np.max(np.abs(y))
|
11 |
|
|
|
6 |
|
7 |
def transcribe(stream, new_chunk):
|
8 |
sr, y = new_chunk
|
9 |
+
|
10 |
+
# Convert to mono if stereo
|
11 |
+
if y.ndim > 1:
|
12 |
+
y = y.mean(axis=1)
|
13 |
+
|
14 |
y = y.astype(np.float32)
|
15 |
y /= np.max(np.abs(y))
|
16 |
|