Spaces:
Running
Running
import streamlit as st | |
import numpy as np | |
import io | |
import os | |
import wave | |
import requests | |
from audio_to_text import audio_to_text | |
from streamlit_mic_recorder import mic_recorder | |
# Get the directory of the current file | |
current_dir = os.path.dirname(os.path.abspath(__file__)) | |
# Initialize Streamlit app layout | |
st.title("Microphone Input in Streamlit") | |
# Record audio | |
audio = mic_recorder( | |
start_prompt="Start recording", | |
stop_prompt="Stop recording", | |
just_once=False, | |
use_container_width=True | |
) | |
# Check if audio is recorded | |
if audio: | |
st.audio(audio['bytes'], format='audio/wav') | |
audio_bytes = audio["bytes"] | |
# Save audio in WEBM format | |
with open("recorded_audio.webm", "wb") as webm_file: | |
webm_file.write(audio_bytes) | |
# Convert audio to text | |
transcription = audio_to_text("recorded_audio.webm") | |
# Display the transcription | |
st.write("Transcription:", transcription) | |
API_URL = "https://eaa0-34-74-179-199.ngrok-free.app/generate" | |
# Optionally, send the transcription to an API | |
headers = { | |
"Content-Type": "application/json" | |
} | |
payload = { | |
"prompt": transcription | |
} | |
response = requests.post(API_URL, json=payload, headers=headers) | |
if response.status_code == 200: | |
st.write("Assistant:", response.json()) | |
else: | |
st.write("Error:", response.status_code, response.text) | |