AigizK sanchit-gandhi HF staff commited on
Commit
2762366
0 Parent(s):

Duplicate from whisper-event/whisper-demo

Browse files

Co-authored-by: Sanchit Gandhi <sanchit-gandhi@users.noreply.huggingface.co>

Files changed (5) hide show
  1. .gitattributes +34 -0
  2. README.md +15 -0
  3. app.py +95 -0
  4. packages.txt +1 -0
  5. requirements.txt +3 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Whisper Demo
3
+ emoji: 🤫
4
+ colorFrom: indigo
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 3.9.1
8
+ app_file: app.py
9
+ pinned: false
10
+ tags:
11
+ - whisper-event
12
+ duplicated_from: whisper-event/whisper-demo
13
+ ---
14
+
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ import gradio as gr
4
+ import pytube as pt
5
+ from transformers import pipeline
6
+ from huggingface_hub import model_info
7
+
8
+ MODEL_NAME = "openai/whisper-small" #this always needs to stay in line 8 :D sorry for the hackiness
9
+
10
+ device = 0 if torch.cuda.is_available() else "cpu"
11
+
12
+ pipe = pipeline(
13
+ task="automatic-speech-recognition",
14
+ model=MODEL_NAME,
15
+ chunk_length_s=30,
16
+ device=device,
17
+ )
18
+
19
+ def transcribe(microphone, file_upload):
20
+ warn_output = ""
21
+ if (microphone is not None) and (file_upload is not None):
22
+ warn_output = (
23
+ "WARNING: You've uploaded an audio file and used the microphone. "
24
+ "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n"
25
+ )
26
+
27
+ elif (microphone is None) and (file_upload is None):
28
+ return "ERROR: You have to either use the microphone or upload an audio file"
29
+
30
+ file = microphone if microphone is not None else file_upload
31
+
32
+ text = pipe(file)["text"]
33
+
34
+ return warn_output + text
35
+
36
+
37
+ def _return_yt_html_embed(yt_url):
38
+ video_id = yt_url.split("?v=")[-1]
39
+ HTML_str = (
40
+ f'<center> <iframe width="500" height="320" src="https://www.youtube.com/embed/{video_id}"> </iframe>'
41
+ " </center>"
42
+ )
43
+ return HTML_str
44
+
45
+
46
+ def yt_transcribe(yt_url):
47
+ yt = pt.YouTube(yt_url)
48
+ html_embed_str = _return_yt_html_embed(yt_url)
49
+ stream = yt.streams.filter(only_audio=True)[0]
50
+ stream.download(filename="audio.mp3")
51
+
52
+ text = pipe("audio.mp3")["text"]
53
+
54
+ return html_embed_str, text
55
+
56
+
57
+ demo = gr.Blocks()
58
+
59
+ mf_transcribe = gr.Interface(
60
+ fn=transcribe,
61
+ inputs=[
62
+ gr.inputs.Audio(source="microphone", type="filepath", optional=True),
63
+ gr.inputs.Audio(source="upload", type="filepath", optional=True),
64
+ ],
65
+ outputs="text",
66
+ layout="horizontal",
67
+ theme="huggingface",
68
+ title="Whisper Demo: Transcribe Audio",
69
+ description=(
70
+ "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the the fine-tuned"
71
+ f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
72
+ " of arbitrary length."
73
+ ),
74
+ allow_flagging="never",
75
+ )
76
+
77
+ yt_transcribe = gr.Interface(
78
+ fn=yt_transcribe,
79
+ inputs=[gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL")],
80
+ outputs=["html", "text"],
81
+ layout="horizontal",
82
+ theme="huggingface",
83
+ title="Whisper Demo: Transcribe YouTube",
84
+ description=(
85
+ "Transcribe long-form YouTube videos with the click of a button! Demo uses the the fine-tuned checkpoint:"
86
+ f" [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files of"
87
+ " arbitrary length."
88
+ ),
89
+ allow_flagging="never",
90
+ )
91
+
92
+ with demo:
93
+ gr.TabbedInterface([mf_transcribe, yt_transcribe], ["Transcribe Audio", "Transcribe YouTube"])
94
+
95
+ demo.launch(enable_queue=True)
packages.txt ADDED
@@ -0,0 +1 @@
 
1
+ ffmpeg
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ transformers
2
+ torch
3
+ pytube