import streamlit as st import torch from transformers import pipeline import tempfile # Set the Streamlit page config st.set_page_config(page_title="Meeting Summarizer", layout="centered") # Title st.title("📝 Intelligent Meeting Summarizer") st.write("Upload your English meeting audio, and we'll generate a professional summary for you using Hugging Face models.") # Load ASR pipeline @st.cache_resource def load_asr_pipeline(): return pipeline("automatic-speech-recognition", model="facebook/s2t-medium-librispeech-asr") # Load Text Generation pipeline @st.cache_resource def load_summary_pipeline(): return pipeline( task="text-generation", model="huggyllama/llama-7b", torch_dtype=torch.float16, device=0 # set to -1 for CPU ) asr_pipeline = load_asr_pipeline() gen_pipeline = load_summary_pipeline() # Upload audio file uploaded_file = st.file_uploader("📤 Upload your meeting audio (.wav)", type=["wav", "mp3", "flac"]) if uploaded_file is not None: # Save to temp file with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_audio: tmp_audio.write(uploaded_file.read()) tmp_audio_path = tmp_audio.name st.audio(uploaded_file, format='audio/wav') if st.button("🔍 Transcribe and Summarize"): # ASR: Audio to Text with st.spinner("Transcribing audio..."): result = asr_pipeline(tmp_audio_path) transcription = result["text"] st.subheader("📄 Transcribed Text") st.write(transcription) # Text to Text with st.spinner("Generating summary..."): prompt = f"Summarize the following meeting transcript into a professional meeting report:\n{transcription}\n\nSummary:" summary = gen_pipeline(prompt, max_new_tokens=300, do_sample=True, top_k=50, temperature=0.7)[0]["generated_text"] st.subheader("🧠 Meeting Summary") st.write(summary)