Triplet / app.py
birgermoell's picture
Upload 2 files
03ef4b7 verified
import streamlit as st
import openai
import tempfile
import os
# Set up the Streamlit app
st.title("Triplet Audio Transcription and Journal Structuring App")
# Make an image for the app with the triplet.png image
st.image("triplet3.png", width=600)
st.write("Upload an audio file, and we'll transcribe it and structure the output as a journal entry.")
# Supported audio file formats
SUPPORTED_FORMATS = ['flac', 'm4a', 'mp3', 'mp4', 'mpeg', 'mpga', 'oga', 'ogg', 'wav', 'webm']
# Input field for OpenAI API key
api_key = st.text_input("Enter your OpenAI API key:", type="password")
st.write("Everything is open source, please clone and run locally. Bring your own API key. We are using base whisper and chat-gpt models from OpenAI. The app is built using Streamlit and Python. (Note: This app is for demonstration purposes only. Do not upload sensitive information.")
if api_key:
# Initialize OpenAI client with the API key
client = openai.OpenAI(api_key=api_key)
# Function to transcribe audio using OpenAI Whisper
def transcribe_audio(file_path):
try:
with open(file_path, "rb") as audio_file:
transcript = client.audio.transcriptions.create(
file=audio_file,
model="whisper-1",
response_format="verbose_json"
)
transcription = transcript.text
return transcription
except Exception as e:
st.error(f"Error in transcription: {e}")
return None
# Function to structure transcription as a journal entry using Chat-GPT
def structure_as_journal(transcription):
try:
prompt = f"Structure the following transcription as a detailed journal entry:\n\n{transcription}"
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
],
max_tokens=1024
)
journal_entry = response.choices[0].message.content
return journal_entry
except Exception as e:
st.error(f"Error in structuring journal entry: {e}")
return None
# File uploader for audio files
uploaded_file = st.file_uploader("Upload an audio file", type=SUPPORTED_FORMATS)
if uploaded_file:
# Save uploaded file temporarily
with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(uploaded_file.name)[1]) as temp_file:
temp_file.write(uploaded_file.read())
temp_file_path = temp_file.name
# Transcribe audio
st.write("Transcribing audio...")
transcription = transcribe_audio(temp_file_path)
if transcription:
st.write("Transcription:")
st.write(transcription)
# Structure transcription as a journal entry
st.write("Structuring as a journal entry...")
journal_entry = structure_as_journal(transcription)
if journal_entry:
st.write("Journal Entry:")
st.write(journal_entry)
# Clean up temporary file
os.remove(temp_file_path)
else:
st.warning("Please enter your OpenAI API key to proceed.")