File size: 2,646 Bytes
d242d3a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import streamlit as st
# from transformers import AutoModelForSpeechSeq2Seq, Wav2Vec2Processor
from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq
import torch
import tempfile
from pydub import AudioSegment
import numpy as np

# Define available models
# available_models = [
#     "facebook/s2t-small-mustc-en-fr-st",
#     "facebook/s2t-medium-mustc-en-fr-st",
#     "facebook/s2t-large-mustc-en-fr-st"
# ]

available_models = ["Yehor/whisper-small-ukrainian"]

st.title("Voice Recognition App using SpeechSeq2Seq")

st.write("Upload an audio file and choose a model to transcribe it to text.")

# Model selection dropdown
model_choice = st.selectbox("Choose a SpeechSeq2Seq model", available_models)


# Load the selected model and processor
@st.cache_resource
def load_model_and_processor(model_name):
    # model = AutoModelForSpeechSeq2Seq.from_pretrained(model_name)
    # processor = Wav2Vec2Processor.from_pretrained(model_name)
    model = AutoModelForSpeechSeq2Seq.from_pretrained(model_name)
    processor = AutoProcessor.from_pretrained(model_name)
    return model, processor

st.write(f"Loading {model_choice} model...")
model, processor = load_model_and_processor(model_choice)
st.write(f"{model_choice} model loaded successfully.")

# File uploader for audio file
uploaded_file = st.file_uploader("Choose an audio file", type=["wav", "mp3", "m4a"])

if uploaded_file is not None:
    # Save the uploaded file temporarily
    with tempfile.NamedTemporaryFile(delete=False) as temp_file:
        temp_file.write(uploaded_file.read())
        temp_file_path = temp_file.name

    # Convert audio file to a format supported by the processor (if necessary)
    audio = AudioSegment.from_file(temp_file_path)
    temp_wav_path = tempfile.mktemp(suffix=".wav")
    audio.export(temp_wav_path, format="wav")

    st.audio(uploaded_file, format="audio/wav")

    st.write("Transcribing audio...")

    # # Load audio
    # audio_input = AudioSegment.from_file(temp_wav_path).set_frame_rate(16000).set_channels(1)
    # audio_input = np.array(audio_input.get_array_of_samples())
    #
    # # Normalize audio
    # audio_input = (audio_input - np.mean(audio_input)) / np.std(audio_input)
    #
    # # Process the audio
    # input_features = processor(audio_input, return_tensors="pt", sampling_rate=16000).input_values
    #
    # # Generate transcription
    # with torch.no_grad():
    #     predicted_ids = model.generate(input_features)
    #
    # transcription = processor.batch_decode(predicted_ids)[0]

    transcription = model.transcribe(temp_wav_path)

    st.write("Transcription:")
    st.write(transcription)