File size: 4,855 Bytes
f33c52c
 
 
543294f
80dd19c
543294f
 
 
 
 
 
 
 
 
 
 
f33c52c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
# ─────────────────────────────────────────────────────────────────────────────
# src/streamlit_app.py
# ─────────────────────────────────────────────────────────────────────────────
import os, joblib


MODEL_PATH = os.path.join(os.getcwd(), "src", "voice_model.joblib")
if not os.path.exists(MODEL_PATH):
    # 第一次启动时训练并保存
    clf = train_voice_model()
    os.makedirs(os.path.dirname(MODEL_PATH), exist_ok=True)
    joblib.dump(clf, MODEL_PATH)

# 然后再加载
voice_clf = joblib.load(MODEL_PATH)

os.environ["STREAMLIT_HOME"] = "/tmp/.streamlit"
# 關閉 CORS、關掉使用者統計(可選)
os.environ["STREAMLIT_SERVER_ENABLE_CORS"] = "false"
os.environ["STREAMLIT_GATHER_USAGE_STATS"]  = "false"
# 建目錄
os.makedirs("/tmp/.streamlit", exist_ok=True)

# ─────────────── 2. 把 DeepFace 的 HOME 指到 /tmp/.deepface ───────────────
os.environ["DEEPFACE_HOME"] = "/tmp/.deepface"
# 建好 weights 子目錄,DeepFace 下載權重就不會再 mkdir /
os.makedirs("/tmp/.deepface/weights", exist_ok=True)



import streamlit as st
import cv2, numpy as np, base64, io
import librosa, joblib
from deepface import DeepFace


# ── 1️⃣ 載入所有模型(DeepFace + 你的語音模型)──
@st.cache_resource(show_spinner=False)
def load_models():
    # a) DeepFace 預熱
    DeepFace.analyze(
        img_path = np.zeros((224,224,3), dtype=np.uint8),
        actions  = ['emotion'],
        enforce_detection=False
    )
    # b) 載入你 commit 到 repo 的語音模型檔案
   # 取得目前檔案 (streamlit_app.py) 的資料夾路徑
    root = os.path.dirname(__file__)  
# 然後從 src/ 底下讀模型檔
    model_path = os.path.join(root, "voice_model.joblib")
    audio_model = joblib.load(model_path)

    return audio_model

audio_model = load_models()


# ── 2️⃣ 文本情緒分析函式 ─────────────────────────────────────────────────────
def analyze_text_fn(text: str) -> str:
    if any(w in text for w in ["開心","快樂","愉快","喜悦","喜悅","歡喜","興奮","高興"]):
        return "happy"
    if any(w in text for w in ["生氣","憤怒","不爽","發火","火大","氣憤"]):
        return "angry"
    if any(w in text for w in ["傷心","難過","哭","難受","心酸","憂","悲","哀","痛苦","慘","愁"]):
        return "sad"
    if any(w in text for w in ["驚訝","意外","嚇","驚詫","詫異","訝異","好奇"]):
        return "surprise"
    if any(w in text for w in ["怕","恐懼","緊張","懼","膽怯","畏"]):
        return "fear"
    return "neutral"


# ── 3️⃣ 語音情緒分析函式 ─────────────────────────────────────────────────────
def analyze_audio_fn(wav_bytes: bytes) -> str:
    # 讀 wav bytes
    y, sr = librosa.load(io.BytesIO(wav_bytes), sr=None)
    mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=13)
    mf = np.mean(mfccs.T, axis=0)
    return audio_model.predict([mf])[0]


# ── 4️⃣ Streamlit 介面 ───────────────────────────────────────────────────────
st.set_page_config(page_title="多模態即時情緒分析", layout="wide")
st.title("📱 多模態即時情緒分析")

tabs = st.tabs(["🔴 Face(本地測試)", "🎤 上傳語音檔", "⌨️ 輸入文字"])
with tabs[0]:
    st.header("Live Face(僅限本地瀏覽器測試)")
    st.info("⚠️ Hugging Face Spaces 無法直接開啟攝影機,請在本機使用 `streamlit run app.py` 測試。")

    # 這邊如果用 streamlit-webrtc 才能在本地呼叫攝影機
    # 省略示範,或改成 gradio demo

with tabs[1]:
    st.header("🎤 上傳 WAV 檔進行分析")
    # 支援 .wav 上傳
    wav_file = st.file_uploader("請選擇 .wav 音檔", type=["wav"])
    if wav_file is not None:
        # 讀 bytes,呼叫分析函式
        wav_bytes = wav_file.read()
        emo = analyze_audio_fn(wav_bytes)
        st.success(f"🎤 語音偵測到的情緒:**{emo}**")


with tabs[2]:
    st.header("輸入文字進行情緒分析")
    txt = st.text_area("請在此輸入文字")
    if st.button("開始分析"):
        emo = analyze_text_fn(txt)
        st.success(f"📝 文本偵測到的情緒:**{emo}**")