import streamlit as st from audio_recorder_streamlit import audio_recorder import time import re import os import whisper model = whisper.load_model('medium') from transformers import AutoTokenizer, AutoModelForSeq2SeqLM #loading the tokenizer and the model tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-hi") model_hindi = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-hi") def translator(text): # function to translate English text to Hindi input_ids = tokenizer.encode(text, return_tensors="pt", padding=True) outputs = model_hindi.generate(input_ids) decoded_text = tokenizer.decode(outputs[0], skip_special_tokens=True) return decoded_text def split_sentences(generated_text): split_text = re.split(r'(?", unsafe_allow_html=True) st.subheader("Upload your Audio for Transcription") #st.write("Upload your Audio") uploaded_file = st.file_uploader("WAV format", type=["wav"]) if uploaded_file is not None: with st.spinner("Transcribing and translating audio... Please wait."): audio_path = "uploaded_audio.wav" with open(audio_path, "wb") as f: f.write(uploaded_file.getvalue()) result_text, translated_text = transcribe(audio_path) st.subheader("Original Text (English):") st.write(result_text) st.subheader("Translated Text (Hindi):") st.write(translated_text) # Remove the temporary audio file os.remove(audio_path) if __name__ == "__main__": main()