Raivis Dejus commited on
Commit
306d4b2
1 Parent(s): d57698b

Adding app files

Browse files
Files changed (4) hide show
  1. README.md +8 -1
  2. app.py +156 -0
  3. packages.txt +1 -0
  4. requirements.txt +3 -0
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: LatvianSpeechRecognition
3
  emoji: 🦀
4
  colorFrom: green
5
  colorTo: indigo
@@ -7,6 +7,13 @@ sdk: gradio
7
  sdk_version: 4.28.3
8
  app_file: app.py
9
  pinned: false
 
 
 
 
 
 
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Latvian Speech Recognition
3
  emoji: 🦀
4
  colorFrom: green
5
  colorTo: indigo
 
7
  sdk_version: 4.28.3
8
  app_file: app.py
9
  pinned: false
10
+ preload_from_hub:
11
+ - RaivisDejus/whisper-tiny-lv
12
+ - RaivisDejus/whisper-small-lv
13
+ - AiLab-IMCS-UL/whisper-large-v3-lv-late-cv17
14
+ tags:
15
+ - latvian
16
+ - whisper
17
  ---
18
 
19
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import time
3
+
4
+ import gradio as gr
5
+ import yt_dlp as youtube_dl
6
+ from transformers import pipeline
7
+ from transformers.pipelines.audio_utils import ffmpeg_read
8
+
9
+ import tempfile
10
+ import os
11
+
12
+ BATCH_SIZE = 8
13
+ FILE_LIMIT_MB = 1000
14
+ YT_LENGTH_LIMIT_S = 3600 # limit to 1 hour YouTube files
15
+
16
+ device = 0 if torch.cuda.is_available() else "cpu"
17
+
18
+
19
+ def transcribe(model, audio, task):
20
+ if audio is None:
21
+ raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
22
+
23
+ pipe = pipeline(
24
+ task="automatic-speech-recognition",
25
+ model=model,
26
+ chunk_length_s=30,
27
+ device=device,
28
+ )
29
+ text = pipe(audio, batch_size=BATCH_SIZE, generate_kwargs={"language": "latvian", "task": task}, return_timestamps=True)["text"]
30
+ return text
31
+
32
+
33
+ def _return_yt_html_embed(yt_url):
34
+ video_id = yt_url.split("?v=")[-1]
35
+ HTML_str = (
36
+ f'<center> <iframe width="500" height="320" src="https://www.youtube.com/embed/{video_id}"> </iframe>'
37
+ " </center>"
38
+ )
39
+ return HTML_str
40
+
41
+ def download_yt_audio(yt_url, filename):
42
+ info_loader = youtube_dl.YoutubeDL()
43
+
44
+ try:
45
+ info = info_loader.extract_info(yt_url, download=False)
46
+ except youtube_dl.utils.DownloadError as err:
47
+ raise gr.Error(str(err))
48
+
49
+ file_length = info["duration_string"]
50
+ file_h_m_s = file_length.split(":")
51
+ file_h_m_s = [int(sub_length) for sub_length in file_h_m_s]
52
+
53
+ if len(file_h_m_s) == 1:
54
+ file_h_m_s.insert(0, 0)
55
+ if len(file_h_m_s) == 2:
56
+ file_h_m_s.insert(0, 0)
57
+ file_length_s = file_h_m_s[0] * 3600 + file_h_m_s[1] * 60 + file_h_m_s[2]
58
+
59
+ if file_length_s > YT_LENGTH_LIMIT_S:
60
+ yt_length_limit_hms = time.strftime("%HH:%MM:%SS", time.gmtime(YT_LENGTH_LIMIT_S))
61
+ file_length_hms = time.strftime("%HH:%MM:%SS", time.gmtime(file_length_s))
62
+ raise gr.Error(f"Maximum YouTube length is {yt_length_limit_hms}, got {file_length_hms} YouTube video.")
63
+
64
+ ydl_opts = {"outtmpl": filename, "format": "worstvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best"}
65
+
66
+ with youtube_dl.YoutubeDL(ydl_opts) as ydl:
67
+ try:
68
+ ydl.download([yt_url])
69
+ except youtube_dl.utils.ExtractorError as err:
70
+ raise gr.Error(str(err))
71
+
72
+
73
+ def yt_transcribe(model, yt_url, task):
74
+ html_embed_str = _return_yt_html_embed(yt_url)
75
+
76
+ with tempfile.TemporaryDirectory() as tmpdirname:
77
+ filepath = os.path.join(tmpdirname, "video.mp4")
78
+ download_yt_audio(yt_url, filepath)
79
+ with open(filepath, "rb") as f:
80
+ inputs = f.read()
81
+
82
+ pipe = pipeline(
83
+ task="automatic-speech-recognition",
84
+ model=model,
85
+ chunk_length_s=30,
86
+ device=device,
87
+ )
88
+ inputs = ffmpeg_read(inputs, pipe.feature_extractor.sampling_rate)
89
+ inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}
90
+
91
+ text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"language": "latvian", "task": task}, return_timestamps=True)["text"]
92
+
93
+ return html_embed_str, text
94
+
95
+
96
+ demo = gr.Blocks()
97
+
98
+ transcribe = gr.Interface(
99
+ fn=transcribe,
100
+ inputs=[
101
+ # gr.Markdown(
102
+ # """
103
+ # Test Latvian speech recognition (STT) models. Three models are available:
104
+ # * [tiny](https://huggingface.co/RaivisDejus/whisper-tiny-lv) - Fastest, requiring least RAM, but also least accurate
105
+ # * [small](https://huggingface.co/RaivisDejus/whisper-small-lv) - Reasonably fast, reasonably accurate, requiring reasonable amounts of RAM
106
+ # * [large](https://huggingface.co/AiLab-IMCS-UL/whisper-large-v3-lv-late-cv17) - Most accurate, developed by scientists from [ailab.lv](https://ailab.lv/). Requires most RAM and for best performance should be run on a GPU.
107
+ # """
108
+ # ),
109
+ gr.Dropdown([
110
+ ("tiny", "RaivisDejus/whisper-tiny-lv"),
111
+ ("small", "RaivisDejus/whisper-small-lv"),
112
+ ("large", "AiLab-IMCS-UL/whisper-large-v3-lv-late-cv17")
113
+ ], label="Model", value="RaivisDejus/whisper-small-lv"),
114
+ gr.Audio(sources=["upload", "microphone"],type="filepath", label="Audio"),
115
+ gr.Radio([("Transcribe", "transcribe"), ("Translate to English", "translate",)], label="Task", value="transcribe"),
116
+ ],
117
+ outputs=gr.Textbox(label="Transcription", lines=10),
118
+ title="Latvian speech recognition: Transcribe Audio",
119
+ description=("""
120
+ Test Latvian speech recognition (STT) models. Three models are available:
121
+
122
+ * [tiny](https://huggingface.co/RaivisDejus/whisper-tiny-lv) - Fastest, requiring least RAM, but also least accurate
123
+
124
+ * [small](https://huggingface.co/RaivisDejus/whisper-small-lv) - Reasonably fast, reasonably accurate, requiring reasonable amounts of RAM
125
+
126
+ * [large](https://huggingface.co/AiLab-IMCS-UL/whisper-large-v3-lv-late-cv17) - Most accurate, developed by scientists from [ailab.lv](https://ailab.lv/). Requires most RAM and for best performance should be run on a GPU.
127
+ """
128
+ ),
129
+ allow_flagging="never",
130
+ )
131
+
132
+ yt_transcribe = gr.Interface(
133
+ fn=yt_transcribe,
134
+ inputs=[
135
+ gr.Dropdown([
136
+ ("tiny", "RaivisDejus/whisper-tiny-lv"),
137
+ ("small", "RaivisDejus/whisper-small-lv"),
138
+ ("large", "AiLab-IMCS-UL/whisper-large-v3-lv-late-cv17")
139
+ ], label="Model", value="RaivisDejus/whisper-small-lv"),
140
+ gr.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"),
141
+ gr.Radio([("Transcribe", "transcribe"), ("Translate to English", "translate",)], label="Task", value="transcribe")
142
+ ],
143
+ outputs=["html", "text"],
144
+ title="Latvian speech recognition: Transcribe YouTube",
145
+ description=(
146
+ "Transcribe long-form YouTube videos with the click of a button! Demo uses the checkpoint"
147
+ ),
148
+ allow_flagging="never",
149
+ )
150
+
151
+ with demo:
152
+ gr.TabbedInterface([transcribe, yt_transcribe], ["Microphone / Audio file", "YouTube"])
153
+
154
+ demo.queue(max_size=10)
155
+ demo.launch()
156
+
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ ffmpeg
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ git+https://github.com/huggingface/transformers
2
+ torch
3
+ yt-dlp