# from transformers import pipeline # import gradio as gr # # Load the pipeline with the cache_dir parameter # pipe = pipeline(model="tarteel-ai/whisper-base-ar-quran") # def transcribe(audio): # text = pipe(audio)["text"] # return text # iface = gr.Interface( # fn=transcribe, # inputs=gr.Audio(source="upload", type="filepath"), # outputs="text", # ) # iface.launch() # from transformers import pipeline # model_id = "tarteel-ai/whisper-base-ar-quran" # update with your model id # pipe = pipeline("automatic-speech-recognition", model=model_id) # def transcribe(filepath): # output = pipe( # filepath, # max_new_tokens=10000, # ) # return output["text"] # import gradio as gr # iface = gr.Interface( # fn=transcribe, # inputs=gr.Audio(source="upload", type="filepath"), # outputs="text", # ) # iface.launch() import gradio as gr gr.Interface.load("models/tarteel-ai/whisper-base-ar-quran").launch()