File size: 1,973 Bytes
5dd1ba5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import streamlit as st
import torch
from transformers import pipeline
import tempfile

# Set the Streamlit page config
st.set_page_config(page_title="Meeting Summarizer", layout="centered")

# Title
st.title("πŸ“ Intelligent Meeting Summarizer")
st.write("Upload your English meeting audio, and we'll generate a professional summary for you using Hugging Face models.")

# Load ASR pipeline
@st.cache_resource
def load_asr_pipeline():
    return pipeline("automatic-speech-recognition", model="facebook/s2t-medium-librispeech-asr")

# Load Text Generation pipeline
@st.cache_resource
def load_summary_pipeline():
    return pipeline(
        task="text-generation",
        model="huggyllama/llama-7b",
        torch_dtype=torch.float16,
        device=0  # set to -1 for CPU
    )

asr_pipeline = load_asr_pipeline()
gen_pipeline = load_summary_pipeline()

# Upload audio file
uploaded_file = st.file_uploader("πŸ“€ Upload your meeting audio (.wav)", type=["wav", "mp3", "flac"])

if uploaded_file is not None:
    # Save to temp file
    with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_audio:
        tmp_audio.write(uploaded_file.read())
        tmp_audio_path = tmp_audio.name

    st.audio(uploaded_file, format='audio/wav')

    if st.button("πŸ” Transcribe and Summarize"):
        # ASR: Audio to Text
        with st.spinner("Transcribing audio..."):
            result = asr_pipeline(tmp_audio_path)
            transcription = result["text"]
            st.subheader("πŸ“„ Transcribed Text")
            st.write(transcription)

        # Text to Text
        with st.spinner("Generating summary..."):
            prompt = f"Summarize the following meeting transcript into a professional meeting report:\n{transcription}\n\nSummary:"
            summary = gen_pipeline(prompt, max_new_tokens=300, do_sample=True, top_k=50, temperature=0.7)[0]["generated_text"]
            st.subheader("🧠 Meeting Summary")
            st.write(summary)