from transformers import pipeline from pytube import YouTube import gradio as gr import librosa import hopsworks project = hopsworks.login() fs = project.get_feature_store() dataset_api = project.get_dataset_api() dataset_api.download("Resources/titanic/images/latest_titanic.png", overwrite=True) # change link # dataset_api.download("Resources/images/deadImage.png", overwrite=True) # change link # pipe = pipeline(model="fimster/whisper-small-sv-SE") # change model # pipe = pipeline(model="ayberkuckun/whisper-small-sv-SE") pipe = pipeline(model="openai/whisper-small") def transcribe(url): selected_video = YouTube(url) try: audio = selected_video.streams.filter(only_audio=True, file_extension='mp4')[0] except: raise Exception("Can't find an mp4 audio.") audio.download(filename="audio.mp4") speech_array, sr = librosa.load("audio.mp4", sr=16000) output = pipe(speech_array[:sr*30]) return output["text"] iface = gr.Interface( fn=transcribe, inputs=gr.Textbox("https://www.youtube.com/watch?v=n9g12Xm9UJM", label="Paste a YouTube video URL"), outputs=[gr.Textbox(label="Only the first approximately 30 sec will be transcripted"), gr.Image("latest_titanic.png", label="Model Scores")], # gr.Image("deadImage.png", elem_id="predicted-img", label="Model Scores")], title="Whisper Small Swedish", description="Realtime demo for Swedish speech recognition using a fine-tuned Whisper small model.", allow_flagging="never" ) iface.launch()