antimo / app.py
mobrown's picture
Update app.py
da8e0c6 verified
import gradio as gr
import requests
import time
import os
from dotenv import load_dotenv
# Load AssemblyAI API key from .env
load_dotenv()
ASSEMBLYAI_API_KEY = os.getenv("ASSEMBLYAI_API_KEY")
if not ASSEMBLYAI_API_KEY:
raise ValueError("Missing ASSEMBLYAI_API_KEY in environment variables.")
def transcribe_with_assemblyai_file(file_path, api_key):
headers = {"authorization": api_key}
upload_url = "https://api.assemblyai.com/v2/upload"
def read_file(filename, chunk_size=5242880):
with open(filename, 'rb') as f:
while True:
data = f.read(chunk_size)
if not data:
break
yield data
print("Uploading file to AssemblyAI...")
upload_response = requests.post(upload_url, headers=headers, data=read_file(file_path))
audio_url = upload_response.json().get("upload_url")
if not audio_url:
return "Error during file upload."
transcript_endpoint = "https://api.assemblyai.com/v2/transcript"
transcript_request = {"audio_url": audio_url}
transcript_response = requests.post(transcript_endpoint, json=transcript_request, headers=headers)
transcript_id = transcript_response.json().get("id")
if not transcript_id:
return "Error initiating transcription."
polling_url = f"{transcript_endpoint}/{transcript_id}"
print("Transcription in progress...")
while True:
polling_response = requests.get(polling_url, headers=headers).json()
status = polling_response.get("status")
if status == "completed":
return polling_response.get("text", "No transcription text available.")
elif status == "error":
return f"Error transcribing audio: {polling_response.get('error')}"
time.sleep(5)
def transcribe(audio_file_path):
# If user provides no audio, fallback to CommodityReport.mp3
file_path = audio_file_path if audio_file_path else "CommodityReport.mp3"
return transcribe_with_assemblyai_file(file_path, ASSEMBLYAI_API_KEY)
# Create a simplified UI without the upload button
with gr.Blocks() as demo:
gr.Markdown("# Transcription")
# Instructions with HTML for better styling
gr.HTML("""
<div class="instructions-container">
<h4 class="section-header">USING AN AUDIO FILE:</h4>
<ul>
<li>Upload an audio file by dropping it or uploading it in the panel below. (If you don't have an audio file, just click 'Transcribe' and it will transcribe a default file.)</li>
<li>Click Transcribe</li>
</ul>
<h4 class="section-header">RECORD YOUR OWN AUDIO AND TRANSCRIBE IT:</h4>
<ul>
<li>Click the grey microphone icon in the middle above the 'Transcribe' button.</li>
<li>Then click the 'Record' button and click the 'Stop' button when finished.</li>
<li>Click Transcribe</li>
</ul>
</div>
""")
# Use the original Audio component but make it smaller
audio_input = gr.Audio(
label="", # Remove the label to reduce spacing
type="filepath",
interactive=True
)
# Add styling to make the audio box smaller and improve visuals
gr.HTML("""
<style>
/* Style the instructions container */
.instructions-container {
background-color: #f8f9fa;
padding: 5px;
border-radius: 5px;
margin-bottom: 5px;
border-left: 4px solid #FF6B00;
}
/* Make the section headers larger */
.section-header {
font-size: 14px;
font-weight: bold;
margin-top: 5px;
margin-bottom: 5px;
color: #333;
}
/* Style list items */
.instructions-container ul {
margin-bottom: 5px;
}
.instructions-container li {
margin-bottom: 5px;
}
/* Highlight the microphone icon */
.audio-player-footer button:last-child svg {
color: #888 !important;
transform: scale(1.2);
}
/* Make the audio box smaller */
.gradio-audio {
max-height: 50px;
overflow: hidden;
}
/* Ensure the audio controls remain visible */
.audio-player-footer {
position: relative;
z-index: 10;
}
/* Make the Transcribe button more prominent */
.gradio-button.primary {
font-size: 16px;
padding: 10px 20px;
margin: 5px 0;
background-color: #FF6B00;
}
</style>
""")
# Transcription controls - make button more prominent
transcribe_button = gr.Button("Transcribe", variant="primary", size="lg")
transcript_output = gr.Textbox(label="Transcript", lines=10)
# Connect the transcribe button
transcribe_button.click(fn=transcribe, inputs=audio_input, outputs=transcript_output)
demo.launch(share=True)