# >>>>>> Adapted/frankensteined from these scripts: <<<<<<< # for Summary Interface: # >>>> https://huggingface.co/spaces/khxu/pegasus-text-summarizers/blob/main/app.py # Audio Interface # >>>> https://huggingface.co/spaces/iSky/Speech-audio-to-text-with-grammar-correction/blob/main/app.py # Gramar # >>>> https://huggingface.co/deep-learning-analytics/GrammarCorrector/blob/main/README.md import gradio as gr from transformers import pipeline from gradio.mix import Parallel, Series # >>>>>>>>>>>>>>>>>>>> Danger Below <<<<<<<<<<<<<<<<<<<<<< # Load Interfaces: s2t = gr.Interface.load('huggingface/hf-internal-testing/processor_with_lm') grammar = gr.Interface.load('huggingface/deep-learning-analytics/GrammarCorrector') sum_it = gr.Interface.load('huggingface/SamuelMiller/lil_sum_sum') # Audio Functions: def out(audio): flag = True if audio==None: return "no audio" elif flag: a = s2t(audio) #g = grammar(a) #s = sum_it(g) # Summarize Audio with sum_it return a #grammar(a, num_return_sequences=1) # grammar(s), # Grammar Filter else: return "something is wrong in the function?" # Construct Interfaces: iface = gr.Interface( fn=out, title="Speech Audio to text (with corrected grammar)", description="Let's Hear It!! This app transforms your speech (input) to text with corrected grammar after (output)!", inputs= gr.inputs.Audio(source="microphone", type="filepath", label=None, optional=True), outputs= 'text' ) # Launch Interface iface.launch(enable_queue=True,show_error=True) # From Original Code: # gr.inputs.Audio(source="upload", type="filepath", label=None, optional=True), # examples=[["Grammar-Correct-Sample.mp3"], ["Grammar-Wrong-Sample.mp3"],], #def speech_to_text(inp): #pass # speech recognition model defined here #gr.Interface(speech_to_text, inputs="mic", outputs=gr.Textbox(label="Predicted text", lines=4))