Commit
•
bc1fc84
1
Parent(s):
d9c5190
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
from transformers import WhisperForConditionalGeneration, WhisperProcessor
|
4 |
+
from transformers.models.whisper.tokenization_whisper import LANGUAGES
|
5 |
+
from transformers.pipelines.audio_utils import ffmpeg_read
|
6 |
+
|
7 |
+
import librosa
|
8 |
+
import gradio as gr
|
9 |
+
|
10 |
+
|
11 |
+
model_id = "openai/whisper-large-v2"
|
12 |
+
|
13 |
+
processor = WhisperProcessor.from_pretrained(model_id)
|
14 |
+
model = WhisperForConditionalGeneration.from_pretrained(model_id)
|
15 |
+
|
16 |
+
sampling_rate = processor.feature_extractor.sampling_rate
|
17 |
+
|
18 |
+
bos_token_id = processor.tokenizer.all_special_ids[-106]
|
19 |
+
decoder_input_ids = torch.tensor([bos_token_id])
|
20 |
+
|
21 |
+
|
22 |
+
def process_audio_file(file):
|
23 |
+
with open(file, "rb") as f:
|
24 |
+
inputs = f.read()
|
25 |
+
|
26 |
+
audio = ffmpeg_read(inputs, sampling_rate)
|
27 |
+
return audio
|
28 |
+
|
29 |
+
|
30 |
+
def transcribe(Microphone, File_Upload):
|
31 |
+
warn_output = ""
|
32 |
+
if (Microphone is not None) and (File_Upload is not None):
|
33 |
+
warn_output = "WARNING: You've uploaded an audio file and used the microphone. " \
|
34 |
+
"The recorded file from the microphone will be used and the uploaded audio will be discarded.\n"
|
35 |
+
file = Microphone
|
36 |
+
|
37 |
+
elif (Microphone is None) and (File_Upload is None):
|
38 |
+
return "ERROR: You have to either use the microphone or upload an audio file"
|
39 |
+
|
40 |
+
elif Microphone is not None:
|
41 |
+
file = Microphone
|
42 |
+
else:
|
43 |
+
file = File_Upload
|
44 |
+
|
45 |
+
audio_data = process_audio_file(file)
|
46 |
+
|
47 |
+
input_features = processor(sample["array"], sampling_rate=sample["sampling_rate"], return_tensors="pt").input_features
|
48 |
+
|
49 |
+
with torch.no_grad():
|
50 |
+
logits = model.forward(input_features, decoder_input_ids=decoder_input_ids).logits
|
51 |
+
|
52 |
+
pred_ids = torch.argmax(logits, dim=-1)
|
53 |
+
lang_ids = processor.decode(pred_ids[0])
|
54 |
+
|
55 |
+
lang_ids = lang_ids.lstrip("<|").rstrip("|>")
|
56 |
+
language = LANGUAGES[lang_ids]
|
57 |
+
|
58 |
+
return language
|
59 |
+
|
60 |
+
|
61 |
+
iface = gr.Interface(
|
62 |
+
fn=transcribe,
|
63 |
+
inputs=[
|
64 |
+
gr.inputs.Audio(source="microphone", type='filepath', optional=True),
|
65 |
+
gr.inputs.Audio(source="upload", type='filepath', optional=True),
|
66 |
+
],
|
67 |
+
outputs="text",
|
68 |
+
layout="horizontal",
|
69 |
+
theme="huggingface",
|
70 |
+
title="Whisper Language Identification",
|
71 |
+
description="Demo for Language Identification using OpenAI's [Whisper Large V2](https://huggingface.co/openai/whisper-large-v2)",
|
72 |
+
allow_flagging='never',
|
73 |
+
)
|
74 |
+
iface.launch(enable_queue=True)
|