elina12 commited on
Commit
97d695c
1 Parent(s): 43ba0a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -40
app.py CHANGED
@@ -1,45 +1,23 @@
1
- # from transformers import pipeline
2
- # import gradio as gr
3
 
 
 
4
 
5
- # # Load the pipeline with the cache_dir parameter
6
- # pipe = pipeline(model="tarteel-ai/whisper-base-ar-quran")
 
 
 
 
 
 
7
 
8
- # def transcribe(audio):
9
- # text = pipe(audio)["text"]
10
- # return text
11
-
12
- # iface = gr.Interface(
13
- # fn=transcribe,
14
- # inputs=gr.Audio(source="upload", type="filepath"),
15
- # outputs="text",
16
- # )
17
-
18
- # iface.launch()
19
-
20
-
21
-
22
- # from transformers import pipeline
23
-
24
- # model_id = "tarteel-ai/whisper-base-ar-quran" # update with your model id
25
- # pipe = pipeline("automatic-speech-recognition", model=model_id)
26
-
27
- # def transcribe(filepath):
28
- # output = pipe(
29
- # filepath,
30
- # max_new_tokens=10000,
31
- # )
32
- # return output["text"]
33
-
34
- # import gradio as gr
35
-
36
- # iface = gr.Interface(
37
- # fn=transcribe,
38
- # inputs=gr.Audio(source="upload", type="filepath"),
39
- # outputs="text",
40
- # )
41
-
42
- # iface.launch()
43
  import gradio as gr
44
 
45
- gr.Interface.load("models/tarteel-ai/whisper-base-ar-quran").launch()
 
 
 
 
 
 
 
1
+ from transformers import pipeline
 
2
 
3
+ model_id = "tarteel-ai/whisper-base-ar-quran"
4
+ pipe = pipeline("automatic-speech-recognition", model=model_id)
5
 
6
+ def transcribe(filepath):
7
+ output = pipe(
8
+ filepath,
9
+ max_new_tokens=10000,
10
+ chunk_length_s=30,
11
+ batch_size=8,
12
+ )
13
+ return output["text"]
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  import gradio as gr
16
 
17
+ iface = gr.Interface(
18
+ fn=transcribe,
19
+ inputs=gr.Audio(source="upload", type="filepath"),
20
+ outputs="text",
21
+ )
22
+
23
+ iface.launch()