import gradio as gr def to_audioClassification(): return { audio_classification: gr.Row(visible=True), realtime_classification: gr.Row(visible=False), } def to_realtimeAudioClassification(): return { audio_classification: gr.Row(visible=False), realtime_classification: gr.Row(visible=True), } with gr.Blocks() as demo: with gr.Row(): btn0 = gr.Button("Audio Classification", scale=1, size='lg') btn1 = gr.Button("Realtime Audio Classification", scale=1, size='lg') with gr.Row(visible=False) as audio_classification: with gr.Column(min_width=700): with gr.Accordion("Record an Audio", open=True): inputRecord = gr.Audio(label="Audio Input", source="microphone", type="filepath") with gr.Accordion("Upload a file", open=False): inputUpload = gr.Audio(label="Audio Input", source="upload", type="filepath") clearBtn = gr.ClearButton([inputRecord, inputUpload]) with gr.Column(min_width=700): output = gr.Label(label="Audio Classification") btn = gr.Button(value="Generate Audio") audioOutput = gr.Audio(label="Audio Output", interactive=False) with gr.Row(visible=False) as realtime_classification: with gr.Column(min_width=700): input = gr.Audio(label="Audio Input", source="microphone", type="filepath",streaming=True, every=10) historyOutput = gr.Textbox(label="History", interactive=False) # historyOutput = gr.Label(label="History") with gr.Column(min_width=700): output = gr.Label(label="Audio Classification") btn0.click(fn=to_audioClassification, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa]) btn1.click(fn=to_realtimeAudioClassification, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa]) if __name__ == "__main__": demo.queue() demo.launch()