File size: 1,612 Bytes
7b7b977
e63c96b
 
9e5c989
f209d99
a006d14
f209d99
c520d9a
 
e63c96b
 
 
 
29015f5
c0de39e
a006d14
 
 
1610c78
 
 
 
7f020a5
 
c520d9a
 
 
c0de39e
c520d9a
84dcdcf
c520d9a
 
 
 
 
 
 
c0de39e
c520d9a
 
 
a1c0ebf
c520d9a
7f020a5
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import torch
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
#from transformers import Speech2Text2Processor, SpeechEncoderDecoderModel
import streamlit as st
from audio_recorder_streamlit import audio_recorder
import numpy as np

# Function to transcribe audio to text
def transcribe_audio(audio_bytes):
    processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
    #processor = Speech2Text2Processor.from_pretrained("facebook/s2t-wav2vec2-large-en-de")
    #model = SpeechEncoderDecoderModel.from_pretrained("facebook/s2t-wav2vec2-large-en-de")
    model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
    

    # Convert bytes to numpy array
    audio_array = np.frombuffer(audio_bytes, dtype=np.int16)
    
    # Cast audio array to double precision and normalize
    audio_tensor = torch.tensor(audio_array, dtype=torch.float64) / 32768.0
    
    input_values = processor(audio_tensor, return_tensors="pt", sampling_rate=16000).input_values
    logits = model(input_values).logits
    predicted_ids = torch.argmax(logits, dim=-1)
    transcription = processor.decode(predicted_ids[0])

    return transcription

# Streamlit app
st.title("Audio to Text Transcription..")

audio_bytes = audio_recorder(pause_threshold=3.0, sample_rate=16_000)

if audio_bytes:
    st.audio(audio_bytes, format="audio/wav")
    
    transcription = transcribe_audio(audio_bytes)

    if transcription:
        st.write("Transcription:")
        st.write(transcription)
    else:
        st.write("Error: Failed to transcribe audio.")
else:
    st.write("No audio recorded.")