import gradio as gr import os import requests from helpers import ( make_header, upload_file, request_transcript, wait_for_completion, make_paragraphs_string, ) title = """

šŸ”„AssemblyAI: Conformer-1 DemošŸ”„

""" subtitle = ( """

Automatic Speech Recognition using the AssemblyAI API

""" ) link = """

Click here to learn more about the Conformer-1 model

""" def submit_to_AAI(api_key, radio, audio_file, mic_recording): if radio == "Audio File": audio_data = audio_file elif radio == "Record Audio": audio_data = mic_recording if not api_key: return "Error! Did you use a valid API key?" header = make_header(api_key) # 1. Upload the audio try: upload_url = upload_file(audio_data, header, is_file=False) except requests.exceptions.HTTPError: return "Error! Did you use a valid API key?" except requests.exceptions.ConnectionError: return "Error! Did you use a valid API key?" # if upload_url is None: # return "Error: Did you set a valid API key?" # 2. Request transcript transcript_response = request_transcript(upload_url, header) transcript_id = transcript_response["id"] # 3. Wait for the transcription to complete _, error = wait_for_completion(transcript_id, header) if error is not None: return error # 4. Fetch paragraphs of transcript return make_paragraphs_string(transcript_id, header) def change_audio_source(radio): if radio == "Audio File": return [gr.Audio.update(visible=True), gr.Audio.update(visible=False)] elif radio == "Record Audio": return [gr.Audio.update(visible=False), gr.Audio.update(visible=True)] with gr.Blocks( css="""#col_container {width: 1000px; margin-left: auto; margin-right: auto;} #chatbot {height: 520px; overflow: auto;}""" ) as demo: gr.HTML( '
' ) gr.HTML(title) gr.HTML(subtitle) gr.HTML(link) gr.HTML( """
Duplicate SpaceDuplicate the Space and run securely with your AssemblyAI API Key. Get a free key here.
""" ) with gr.Column(elem_id="col_container"): api_key = gr.Textbox( type="password", label="Enter your AssemblyAI API key here" ) with gr.Box(): # Selector for audio source radio = gr.Radio( ["Audio File", "Record Audio"], label="Audio Source", value="Audio File" ) # Audio object for both file and microphone data audio_file = gr.Audio() mic_recording = gr.Audio(source="microphone", visible=False) gr.Examples( [ os.path.join(os.path.dirname(__file__), "audio/audio_sample1.flac"), os.path.join(os.path.dirname(__file__), "audio/assemblyai_company.mp3") ], audio_file, ) btn = gr.Button("Run") out = gr.Textbox( placeholder="Your formatted transcript will appear here ...", lines=10 ) # Changing audio source changes Audio input component radio.change( fn=change_audio_source, inputs=[radio], outputs=[audio_file, mic_recording] ) # Clicking "submit" uploads selected audio to AssemblyAI, performs requested analyses, and displays results btn.click( fn=submit_to_AAI, inputs=[api_key, radio, audio_file, mic_recording], outputs=out, ) demo.queue(max_size=20, concurrency_count=10).launch(debug=True)