DereAbdulhameed commited on
Commit
834c01a
1 Parent(s): 1884fcf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -2
app.py CHANGED
@@ -1,3 +1,62 @@
1
- import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- gr.load("models/DereAbdulhameed/updated_cli_2").launch(share=True)
 
1
+ #import gradio as gr
2
+ import tempfile
3
+ from pydub import AudioSegment
4
+ from transformers import pipeline
5
+ from pyannote.audio import Pipeline
6
+
7
+ # Load models dynamically
8
+ def load_models(model_size):
9
+ if model_size == "transcriber":
10
+ model_name = "clinifyemr/yoruba-model-finetuned"
11
+ transcriber = pipeline("automatic-speech-recognition", model=model_name)
12
+ return transcriber
13
+ else:
14
+ raise ValueError("Model size not supported in this application.")
15
+
16
+ # Process the audio file
17
+ def process_audio(file, num_speakers, model_size):
18
+ audio_file = AudioSegment.from_file(file.name)
19
+ transcriber = load_models(model_size)
20
+
21
+ # Temporary file setup
22
+ temp_path = tempfile.mktemp(suffix=".wav")
23
+ audio_file.export(temp_path, format="wav")
24
+
25
+ # Load diarization pipeline
26
+ diarization_pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1", use_auth_token="HF_TOKEN")
27
+ diarization = diarization_pipeline(temp_path, min_speakers=num_speakers, max_speakers=5)
28
+
29
+ # Transcribe each segment
30
+ def transcribe_segment(start, end):
31
+ segment_audio = audio_file[start * 1000:end * 1000] # pydub works in milliseconds
32
+ segment_path = tempfile.mktemp(suffix=".wav")
33
+ segment_audio.export(segment_path, format="wav")
34
+ transcription = transcriber(segment_path)
35
+ os.remove(segment_path)
36
+ return transcription['text']
37
+
38
+ transcripts = []
39
+ for segment, _, speaker in diarization.itertracks(yield_label=True):
40
+ transcription_text = transcribe_segment(segment.start, segment.end)
41
+ transcripts.append(f"Speaker {speaker}: {transcription_text}")
42
+
43
+ os.remove(temp_path) # Clean up the temporary file
44
+ return "\n".join(transcripts)
45
+
46
+ # Gradio interface setup
47
+ iface = gr.Interface(
48
+ fn=process_audio,
49
+ inputs=[
50
+ #gr.components.Audio(label="Upload your audio file", type="file"),
51
+ gr.components.Audio(label="Upload your audio file"),
52
+ gr.components.Dropdown(choices=[1,2,3,4], label="Number of Speakers"),
53
+ gr.components.Dropdown(choices=['transcriber'], label="Model Selection") # Assuming only 'transcriber' is relevant here
54
+ ],
55
+ outputs=gr.Textbox(label="Transcription"),
56
+ title="Audio Transcription and Speaker Diarization",
57
+ description="Upload your audio file to transcribe and analyze speaker diarization."
58
+ )
59
+
60
+ if __name__ == "__main__":
61
+ iface.launch()
62