import gradio as gr import os import assemblyai as aai import io from scipy.io.wavfile import write title = """

šŸ”„AssemblyAI: Conformer-2 DemošŸ”„

""" subtitle = ( """

Automatic Speech Recognition using the AssemblyAI API

""" ) link = """

Click here to learn more about the Conformer-2 model

""" def submit_to_AAI(api_key, radio, audio_file, mic_recording): if radio == "Audio File": audio_data = audio_file elif radio == "Record Audio": audio_data = mic_recording if not api_key: return "Error! Did you use a valid API key?" aai.settings.api_key = api_key transcriber = aai.Transcriber() # Create temporary "file" and write data to it sr, aud = audio_data bytes_wav = bytes() temp_file = io.BytesIO(bytes_wav) write(temp_file, sr, aud) # Workaround to upload a file-like object before transcribing # This should be abstracted away in future SDK versions: try: upload_url = aai.api.upload_file(aai.Client.get_default().http_client, temp_file) except aai.types.TranscriptError as e: return str(e) # Now we can transcibe the url transcript = transcriber.transcribe(upload_url) if transcript.error is not None: return transcript.error paragraphs = transcript.get_paragraphs() return "\n\n".join(p.text for p in paragraphs) def change_audio_source(radio): if radio == "Audio File": return [gr.Audio.update(visible=True), gr.Audio.update(visible=False)] elif radio == "Record Audio": return [gr.Audio.update(visible=False), gr.Audio.update(visible=True)] with gr.Blocks( css="""#col_container {width: 1000px; margin-left: auto; margin-right: auto;} #chatbot {height: 520px; overflow: auto;}""" ) as demo: gr.HTML( '
' ) gr.HTML(title) gr.HTML(subtitle) gr.HTML(link) gr.HTML( """
Duplicate SpaceDuplicate the Space and run securely with your AssemblyAI API Key. Get a free key here.
""" ) with gr.Column(elem_id="col_container"): api_key = gr.Textbox( type="password", label="Enter your AssemblyAI API key here" ) with gr.Box(): # Selector for audio source radio = gr.Radio( ["Audio File", "Record Audio"], label="Audio Source", value="Audio File" ) # Audio object for both file and microphone data audio_file = gr.Audio() mic_recording = gr.Audio(source="microphone", visible=False) gr.Examples( [ os.path.join(os.path.dirname(__file__), "audio/audio_sample1.flac"), os.path.join( os.path.dirname(__file__), "audio/assemblyai_company.mp3" ), ], audio_file, ) btn = gr.Button("Run") out = gr.Textbox( placeholder="Your formatted transcript will appear here ...", lines=10 ) # Changing audio source changes Audio input component radio.change( fn=change_audio_source, inputs=[radio], outputs=[audio_file, mic_recording] ) # Clicking "submit" uploads selected audio to AssemblyAI, performs requested analyses, and displays results btn.click( fn=submit_to_AAI, inputs=[api_key, radio, audio_file, mic_recording], outputs=out, ) demo.queue(max_size=20, concurrency_count=10).launch(debug=True)