STT2MeetingNote / app.py
EduTechTeam's picture
Create app.py
12ed9e9 verified
import gradio as gr
import openai
from pydub import AudioSegment
import os
import re
# 轉譯音檔函式,使用 OpenAI Whisper 模型
def transcribe(filename, api_key):
client = openai.OpenAI(api_key=api_key)
with open(filename, "rb") as audio_file:
transcript_txt = client.audio.transcriptions.create(
model="whisper-1",
file=audio_file,
response_format="text"
)
return transcript_txt
# 檢查檔案大小並分段處理(若檔案過大)
def transcribe_large_audio(filename, api_key, segment_length_ms=30 * 60 * 1000):
def get_file_size_in_mb(file_path):
return os.path.getsize(file_path) / (1024 * 1024)
def split_audio_file(file_path, segment_length_ms=30 * 60 * 1000):
audio = AudioSegment.from_file(file_path, format="mp3")
segment_filenames = []
for i in range(0, len(audio), segment_length_ms):
end = min(i + segment_length_ms, len(audio))
segment = audio[i:end]
segment_filename = f"{file_path}_part{len(segment_filenames) + 1}.mp3"
segment.export(segment_filename, format="mp3", bitrate="36k")
segment_filenames.append(segment_filename)
return segment_filenames
transcript_txt = ""
if get_file_size_in_mb(filename) > 25:
audio_chunks = split_audio_file(filename)
for chunk_filename in audio_chunks:
transcript_txt += transcribe(chunk_filename, api_key)
os.remove(chunk_filename)
else:
transcript_txt = transcribe(filename, api_key)
return transcript_txt
# 字數統計函式
def count_words(text):
# 移除空白字符後計算字數
cleaned_text = re.sub(r'\s+', '', text)
return len(cleaned_text)
# 自動斷句與標點符號功能
def auto_punctuate(text, api_key):
openai.api_key = api_key
prompt = """請幫我將以下逐字稿加入適當的標點符號和段落分隔,使文本更容易閱讀:
原文:
""" + text
completion = openai.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt}]
)
return completion.choices[0].message.content
# OpenAI 翻譯文本的函式
def openai_translate_text(text, target_lang, api_key):
openai.api_key = api_key
language_mapping = {
"繁體中文": "繁體中文",
"英文": "英文",
"日文": "日文",
"韓文": "韓文",
"法文": "法文",
"德文": "德文",
"西班牙文": "西班牙文"
}
prompt = f"請將以下文本翻譯成{language_mapping[target_lang]},保持原文的語氣和風格:\n\n{text}"
completion = openai.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt}]
)
return completion.choices[0].message.content
# OpenAI 生成摘要的函式(根據不同受眾)
def openai_generate_summary(text, audience_type, api_key):
openai.api_key = api_key
audience_prompts = {
"學生": "請將以下會議內容整理成適合學生閱讀的摘要,重點放在學習價值和知識傳遞:",
"老師": "請將以下會議內容整理成適合教師參考的摘要,重點放在教學應用和教育意義:",
"會議": "請將以下會議內容整理成正式的會議摘要,重點放在決策、行動項目和重要討論:",
"主管": "請將以下會議內容整理成適合管理層閱讀的摘要,重點放在策略決策和關鍵績效:",
"技術人員": "請將以下會議內容整理成適合技術團隊閱讀的摘要,重點放在技術細節和實作方向:",
"行銷人員": "請將以下會議內容整理成適合行銷團隊閱讀的摘要,重點放在市場策略和推廣重點:",
"一般員工": "請將以下會議內容整理成適合一般員工閱讀的摘要,重點放在執行重點和日常工作相關內容:",
"客戶": "請將以下會議內容整理成適合客戶閱讀的摘要,重點放在服務優化和價值傳遞:",
"投資者": "請將以下會議內容整理成適合投資者閱讀的摘要,重點放在財務表現和未來展望:",
"研究人員": "請將以下會議內容整理成適合研究人員閱讀的摘要,重點放在研究方法和數據分析:"
}
prompt = f"{audience_prompts[audience_type]}\n\n{text}"
completion = openai.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt}]
)
return completion.choices[0].message.content
# 處理音檔轉文字
def process_audio(audio_file, api_key):
if not audio_file or not api_key:
return "請確保上傳音檔和輸入API金鑰", "", 0
try:
# 轉譯音檔
transcript = transcribe_large_audio(audio_file, api_key)
# 計算字數
word_count = count_words(transcript)
return transcript, f"字數統計:{word_count} 字", word_count
except Exception as e:
return f"處理失敗:{str(e)}", "", 0
# 處理標點與段落
def process_punctuation(text, api_key):
if not text or not api_key:
return "請確保有文本內容和API金鑰"
try:
return auto_punctuate(text, api_key)
except Exception as e:
return f"標點處理失敗:{str(e)}"
# 處理翻譯
def process_translation(text, target_lang, api_key):
if not all([text, target_lang, api_key]):
return "請確保所有必要欄位都已填寫"
try:
return openai_translate_text(text, target_lang, api_key)
except Exception as e:
return f"翻譯失敗:{str(e)}"
# 處理摘要
def process_summary(text, audience_type, api_key):
if not all([text, audience_type, api_key]):
return "請確保所有必要欄位都已填寫"
try:
return openai_generate_summary(text, audience_type, api_key)
except Exception as e:
return f"摘要生成失敗:{str(e)}"
# 建立 Gradio 介面
with gr.Blocks() as demo:
gr.Markdown("會議音檔轉文字處理系統")
with gr.Row():
audio_file_input = gr.Audio(type="filepath", label="上傳音檔")
api_key_input = gr.Textbox(label="輸入 OpenAI API 金鑰", type="password")
with gr.Row():
transcript_output = gr.Textbox(label="原始逐字稿", lines=5)
word_count_output = gr.Textbox(label="字數統計")
with gr.Row():
punctuated_output = gr.Textbox(label="加入標點符號後的文本", lines=5)
with gr.Row():
target_lang_input = gr.Dropdown(
choices=["繁體中文", "英文", "日文", "韓文", "法文", "德文", "西班牙文"],
label="選擇目標語言",
value="繁體中文"
)
translated_output = gr.Textbox(label="翻譯結果", lines=5)
with gr.Row():
audience_type_input = gr.Dropdown(
choices=[
"學生", "老師", "會議", "主管", "技術人員",
"行銷人員", "一般員工", "客戶", "投資者", "研究人員"
],
label="選擇摘要類型",
value="會議"
)
summary_output = gr.Textbox(label="客製化摘要", lines=5)
with gr.Row():
transcribe_button = gr.Button("1. 開始轉譯")
punctuate_button = gr.Button("2. 添加標點符號")
translate_button = gr.Button("3. 翻譯文本")
summary_button = gr.Button("4. 生成摘要")
# 連接按鈕和函式
transcribe_button.click(
process_audio,
inputs=[audio_file_input, api_key_input],
outputs=[transcript_output, word_count_output]
)
punctuate_button.click(
process_punctuation,
inputs=[transcript_output, api_key_input],
outputs=punctuated_output
)
translate_button.click(
process_translation,
inputs=[punctuated_output, target_lang_input, api_key_input],
outputs=translated_output
)
summary_button.click(
process_summary,
inputs=[translated_output, audience_type_input, api_key_input],
outputs=summary_output
)
# 啟動介面
if __name__ == "__main__":
demo.launch()