Rhuan288 commited on
Commit
a3669b1
1 Parent(s): 4dfb901

Upload 6 files

Browse files
Files changed (6) hide show
  1. .gitattributes +32 -0
  2. README.md +76 -0
  3. create_handler.ipynb +289 -0
  4. handler.py +33 -0
  5. requirements.txt +1 -0
  6. sample1.flac +0 -0
.gitattributes ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.npy filter=lfs diff=lfs merge=lfs -text
14
+ *.npz filter=lfs diff=lfs merge=lfs -text
15
+ *.onnx filter=lfs diff=lfs merge=lfs -text
16
+ *.ot filter=lfs diff=lfs merge=lfs -text
17
+ *.parquet filter=lfs diff=lfs merge=lfs -text
18
+ *.pb filter=lfs diff=lfs merge=lfs -text
19
+ *.pickle filter=lfs diff=lfs merge=lfs -text
20
+ *.pkl filter=lfs diff=lfs merge=lfs -text
21
+ *.pt filter=lfs diff=lfs merge=lfs -text
22
+ *.pth filter=lfs diff=lfs merge=lfs -text
23
+ *.rar filter=lfs diff=lfs merge=lfs -text
24
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
26
+ *.tflite filter=lfs diff=lfs merge=lfs -text
27
+ *.tgz filter=lfs diff=lfs merge=lfs -text
28
+ *.wasm filter=lfs diff=lfs merge=lfs -text
29
+ *.xz filter=lfs diff=lfs merge=lfs -text
30
+ *.zip filter=lfs diff=lfs merge=lfs -text
31
+ *.zst filter=lfs diff=lfs merge=lfs -text
32
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - audio
5
+ - automatic-speech-recognition
6
+ - endpoints-template
7
+ library_name: generic
8
+ inference: false
9
+ ---
10
+
11
+ # OpenAI [Whisper](https://github.com/openai/whisper) Inference Endpoint example
12
+
13
+ > Whisper is a general-purpose speech recognition model. It is trained on a large dataset of diverse audio and is also a multi-task model that can perform multilingual speech recognition as well as speech translation and language identification.
14
+
15
+ For more information about the model, license and limitations check the original repository at [openai/whisper](https://github.com/openai/whisper).
16
+
17
+ ---
18
+
19
+ This repository implements a custom `handler` task for `automatic-speech-recognition` for 🤗 Inference Endpoints using OpenAIs new Whisper model. The code for the customized pipeline is in the [pipeline.py](https://huggingface.co/philschmid/openai-whisper-endpoint/blob/main/handler.py).
20
+
21
+ There is also a [notebook](https://huggingface.co/philschmid/openai-whisper-endpoint/blob/main/create_handler.ipynb) included, on how to create the `handler.py`
22
+
23
+ ### Request
24
+
25
+ The endpoint expects a binary audio file. Below is a cURL example and a Python example using the `requests` library.
26
+
27
+ **curl**
28
+
29
+ ```bash
30
+ # load audio file
31
+ wget https://cdn-media.huggingface.co/speech_samples/sample1.flac
32
+
33
+ # run request
34
+ curl --request POST \
35
+ --url https://{ENDPOINT}/ \
36
+ --header 'Content-Type: audio/x-flac' \
37
+ --header 'Authorization: Bearer {HF_TOKEN}' \
38
+ --data-binary '@sample1.flac'
39
+ ```
40
+
41
+ **Python**
42
+
43
+ ```python
44
+ import json
45
+ from typing import List
46
+ import requests as r
47
+ import base64
48
+ import mimetypes
49
+
50
+ ENDPOINT_URL=""
51
+ HF_TOKEN=""
52
+
53
+ def predict(path_to_audio:str=None):
54
+ # read audio file
55
+ with open(path_to_audio, "rb") as i:
56
+ b = i.read()
57
+ # get mimetype
58
+ content_type= mimetypes.guess_type(path_to_audio)[0]
59
+
60
+ headers= {
61
+ "Authorization": f"Bearer {HF_TOKEN}",
62
+ "Content-Type": content_type
63
+ }
64
+ response = r.post(ENDPOINT_URL, headers=headers, data=b)
65
+ return response.json()
66
+
67
+ prediction = predict(path_to_audio="sample1.flac")
68
+
69
+ prediction
70
+
71
+ ```
72
+ expected output
73
+
74
+ ```json
75
+ {"text": " going along slushy country roads and speaking to damp audiences in draughty school rooms day after day for a fortnight. He'll have to put in an appearance at some place of worship on Sunday morning, and he can come to us immediately afterwards."}
76
+ ```
create_handler.ipynb ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "## 1. Setup & Installation"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": 1,
13
+ "metadata": {},
14
+ "outputs": [
15
+ {
16
+ "name": "stdout",
17
+ "output_type": "stream",
18
+ "text": [
19
+ "Overwriting requirements.txt\n"
20
+ ]
21
+ }
22
+ ],
23
+ "source": [
24
+ "%%writefile requirements.txt\n",
25
+ "git+https://github.com/openai/whisper.git@8cf36f3508c9acd341a45eb2364239a3d81458b9"
26
+ ]
27
+ },
28
+ {
29
+ "cell_type": "code",
30
+ "execution_count": null,
31
+ "metadata": {},
32
+ "outputs": [],
33
+ "source": [
34
+ "!pip install -r requirements.txt --upgrade"
35
+ ]
36
+ },
37
+ {
38
+ "cell_type": "markdown",
39
+ "metadata": {},
40
+ "source": [
41
+ "## 2. Test model"
42
+ ]
43
+ },
44
+ {
45
+ "cell_type": "code",
46
+ "execution_count": 3,
47
+ "metadata": {},
48
+ "outputs": [
49
+ {
50
+ "name": "stdout",
51
+ "output_type": "stream",
52
+ "text": [
53
+ "--2022-09-23 20:32:18-- https://cdn-media.huggingface.co/speech_samples/sample1.flac\n",
54
+ "Resolving cdn-media.huggingface.co (cdn-media.huggingface.co)... 13.32.151.62, 13.32.151.23, 13.32.151.60, ...\n",
55
+ "Connecting to cdn-media.huggingface.co (cdn-media.huggingface.co)|13.32.151.62|:443... connected.\n",
56
+ "HTTP request sent, awaiting response... 200 OK\n",
57
+ "Length: 282378 (276K) [audio/flac]\n",
58
+ "Saving to: ‘sample1.flac’\n",
59
+ "\n",
60
+ "sample1.flac 100%[===================>] 275.76K --.-KB/s in 0.003s \n",
61
+ "\n",
62
+ "2022-09-23 20:32:18 (78.7 MB/s) - ‘sample1.flac’ saved [282378/282378]\n",
63
+ "\n"
64
+ ]
65
+ }
66
+ ],
67
+ "source": [
68
+ "!wget https://cdn-media.huggingface.co/speech_samples/sample1.flac"
69
+ ]
70
+ },
71
+ {
72
+ "cell_type": "code",
73
+ "execution_count": 9,
74
+ "metadata": {},
75
+ "outputs": [
76
+ {
77
+ "name": "stderr",
78
+ "output_type": "stream",
79
+ "text": [
80
+ "100%|█████████████████████████████████████| 2.87G/2.87G [01:11<00:00, 42.9MiB/s]\n"
81
+ ]
82
+ },
83
+ {
84
+ "name": "stdout",
85
+ "output_type": "stream",
86
+ "text": [
87
+ "Detected language: english\n",
88
+ " going along slushy country roads and speaking to damp audiences in drafty school rooms day after day for a fortnight. he'll have to put in an appearance at some place of worship on sunday morning and he can come to us immediately afterwards.\n"
89
+ ]
90
+ }
91
+ ],
92
+ "source": [
93
+ "import whisper\n",
94
+ "\n",
95
+ "model = whisper.load_model(\"large\")\n",
96
+ "result = model.transcribe(\"sample1.flac\")\n",
97
+ "print(result[\"text\"])"
98
+ ]
99
+ },
100
+ {
101
+ "cell_type": "markdown",
102
+ "metadata": {},
103
+ "source": [
104
+ "## 3. Create Custom Handler for Inference Endpoints\n"
105
+ ]
106
+ },
107
+ {
108
+ "cell_type": "code",
109
+ "execution_count": 5,
110
+ "metadata": {},
111
+ "outputs": [
112
+ {
113
+ "name": "stdout",
114
+ "output_type": "stream",
115
+ "text": [
116
+ "Overwriting handler.py\n"
117
+ ]
118
+ }
119
+ ],
120
+ "source": [
121
+ "%%writefile handler.py\n",
122
+ "from typing import Dict\n",
123
+ "from transformers.pipelines.audio_utils import ffmpeg_read\n",
124
+ "import whisper\n",
125
+ "import torch\n",
126
+ "\n",
127
+ "SAMPLE_RATE = 16000\n",
128
+ "\n",
129
+ "\n",
130
+ "\n",
131
+ "class EndpointHandler():\n",
132
+ " def __init__(self, path=\"\"):\n",
133
+ " # load the model\n",
134
+ " self.model = whisper.load_model(\"medium\")\n",
135
+ "\n",
136
+ "\n",
137
+ " def __call__(self, data: Dict[str, bytes]) -> Dict[str, str]:\n",
138
+ " \"\"\"\n",
139
+ " Args:\n",
140
+ " data (:obj:):\n",
141
+ " includes the deserialized audio file as bytes\n",
142
+ " Return:\n",
143
+ " A :obj:`dict`:. base64 encoded image\n",
144
+ " \"\"\"\n",
145
+ " # process input\n",
146
+ " inputs = data.pop(\"inputs\", data)\n",
147
+ " audio_nparray = ffmpeg_read(inputs, SAMPLE_RATE)\n",
148
+ " audio_tensor= torch.from_numpy(audio_nparray)\n",
149
+ " \n",
150
+ " # run inference pipeline\n",
151
+ " result = self.model.transcribe(audio_nparray)\n",
152
+ "\n",
153
+ " # postprocess the prediction\n",
154
+ " return {\"text\": result[\"text\"]}"
155
+ ]
156
+ },
157
+ {
158
+ "cell_type": "markdown",
159
+ "metadata": {},
160
+ "source": [
161
+ "test custom pipeline"
162
+ ]
163
+ },
164
+ {
165
+ "cell_type": "code",
166
+ "execution_count": 6,
167
+ "metadata": {},
168
+ "outputs": [],
169
+ "source": [
170
+ "from handler import EndpointHandler\n",
171
+ "\n",
172
+ "# init handler\n",
173
+ "my_handler = EndpointHandler(path=\".\")"
174
+ ]
175
+ },
176
+ {
177
+ "cell_type": "code",
178
+ "execution_count": null,
179
+ "metadata": {},
180
+ "outputs": [
181
+ {
182
+ "name": "stderr",
183
+ "output_type": "stream",
184
+ "text": [
185
+ "/home/ubuntu/endpoints/openai-whisper-endpoint/handler.py:27: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at ../torch/csrc/utils/tensor_numpy.cpp:178.)\n",
186
+ " audio_tensor= torch.from_numpy(audio_nparray)\n"
187
+ ]
188
+ },
189
+ {
190
+ "name": "stdout",
191
+ "output_type": "stream",
192
+ "text": [
193
+ "Detected language: english\n"
194
+ ]
195
+ }
196
+ ],
197
+ "source": [
198
+ "import base64\n",
199
+ "from PIL import Image\n",
200
+ "from io import BytesIO\n",
201
+ "import json\n",
202
+ "\n",
203
+ "# file reader\n",
204
+ "with open(\"sample1.flac\", \"rb\") as f:\n",
205
+ " request = {\"inputs\": f.read()}\n",
206
+ "\n",
207
+ "\n",
208
+ "# test the handler\n",
209
+ "pred = my_handler(request)"
210
+ ]
211
+ },
212
+ {
213
+ "cell_type": "code",
214
+ "execution_count": 3,
215
+ "metadata": {},
216
+ "outputs": [
217
+ {
218
+ "data": {
219
+ "text/plain": [
220
+ "{'transcription': \" going along slushy country roads and speaking to damp audiences in draughty school rooms day after day for a fortnight. He'll have to put in an appearance at some place of worship on Sunday morning, and he can come to us immediately afterwards.\"}"
221
+ ]
222
+ },
223
+ "execution_count": 3,
224
+ "metadata": {},
225
+ "output_type": "execute_result"
226
+ }
227
+ ],
228
+ "source": [
229
+ "pred"
230
+ ]
231
+ },
232
+ {
233
+ "cell_type": "code",
234
+ "execution_count": 4,
235
+ "metadata": {},
236
+ "outputs": [
237
+ {
238
+ "data": {
239
+ "text/plain": [
240
+ "'{\"transcription\": \" going along slushy country roads and speaking to damp audiences in draughty school rooms day after day for a fortnight. He\\'ll have to put in an appearance at some place of worship on Sunday morning, and he can come to us immediately afterwards.\"}'"
241
+ ]
242
+ },
243
+ "execution_count": 4,
244
+ "metadata": {},
245
+ "output_type": "execute_result"
246
+ }
247
+ ],
248
+ "source": [
249
+ "import json\n",
250
+ "\n",
251
+ "json.dumps({'transcription': \" going along slushy country roads and speaking to damp audiences in draughty school rooms day after day for a fortnight. He'll have to put in an appearance at some place of worship on Sunday morning, and he can come to us immediately afterwards.\"})"
252
+ ]
253
+ },
254
+ {
255
+ "cell_type": "code",
256
+ "execution_count": null,
257
+ "metadata": {},
258
+ "outputs": [],
259
+ "source": []
260
+ }
261
+ ],
262
+ "metadata": {
263
+ "kernelspec": {
264
+ "display_name": "Python 3.9.13 ('dev': conda)",
265
+ "language": "python",
266
+ "name": "python3"
267
+ },
268
+ "language_info": {
269
+ "codemirror_mode": {
270
+ "name": "ipython",
271
+ "version": 3
272
+ },
273
+ "file_extension": ".py",
274
+ "mimetype": "text/x-python",
275
+ "name": "python",
276
+ "nbconvert_exporter": "python",
277
+ "pygments_lexer": "ipython3",
278
+ "version": "3.9.13"
279
+ },
280
+ "orig_nbformat": 4,
281
+ "vscode": {
282
+ "interpreter": {
283
+ "hash": "f6dd96c16031089903d5a31ec148b80aeb0d39c32affb1a1080393235fbfa2fc"
284
+ }
285
+ }
286
+ },
287
+ "nbformat": 4,
288
+ "nbformat_minor": 2
289
+ }
handler.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict
2
+ from transformers.pipelines.audio_utils import ffmpeg_read
3
+ import whisper
4
+ import torch
5
+
6
+ SAMPLE_RATE = 16000
7
+
8
+
9
+
10
+ class EndpointHandler():
11
+ def __init__(self, path=""):
12
+ # load the model
13
+ self.model = whisper.load_model("tiny")
14
+
15
+
16
+ def __call__(self, data: Dict[str, bytes]) -> Dict[str, str]:
17
+ """
18
+ Args:
19
+ data (:obj:):
20
+ includes the deserialized audio file as bytes
21
+ Return:
22
+ A :obj:`dict`:. base64 encoded image
23
+ """
24
+ # process input
25
+ inputs = data.pop("inputs", data)
26
+ audio_nparray = ffmpeg_read(inputs, SAMPLE_RATE)
27
+ audio_tensor= torch.from_numpy(audio_nparray)
28
+
29
+ # run inference pipeline
30
+ result = self.model.transcribe(audio_nparray)
31
+
32
+ # postprocess the prediction
33
+ return {"text": result["text"]}
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ git+https://github.com/openai/whisper.git@8cf36f3508c9acd341a45eb2364239a3d81458b9
sample1.flac ADDED
Binary file (282 kB). View file