import speech_recognition as sr from pydub import AudioSegment import soundfile import pyrebase from pydub import AudioSegment import soundfile import os import pyrebase import gradio as gr from transformers import AutoProcessor from transformers import Wav2Vec2ForCTC import gradio as gr import torch import torchaudio from transformers import pipeline classifier = pipeline("audio-classification", model="omarelsayeed/ed8am_elnoon_weltanween_bel8onna") def recite_wav(path_name): # fix pcm_16 error # data, samplerate = soundfile.read(path_name) # soundfile.write('_.wav', data , samplerate , subtype='PCM_16') return classifier(path_name) def list_all_files(): for _file in storage.child("wavfiles").list_files(): print(_file.name) def download_wav_file(wav_name): storage.child("wavfiles").child(wav_name).download("xd.wav") def get_quran_text(wav_file_path): download_wav_file(wav_file_path) recitation = recite_wav("xd.wav") os.remove("xd.wav") return recitation firebaseConfig = { "apiKey": "AIzaSyDjgBD762KveE8GBO7jqTTkj_mKhUTDwGM", "authDomain": "quran-c5cbe.firebaseapp.com", "databaseURL":"quran-c5cbe.firebaseio.com/", "projectId": "quran-c5cbe", "storageBucket": "quran-c5cbe.appspot.com", "serviceAccount":"quran-c5cbe-firebase-adminsdk-jvpbe-cebaf5aaa6.json" } firebase = pyrebase.initialize_app(firebaseConfig) storage = firebase.storage() iface = gr.Interface(fn=get_quran_text, inputs="text", outputs="text") iface.launch()