Spaces:
Sleeping
Sleeping
refactor - use app.py
Browse files- .vscode/launch.json +2 -2
- app.py +77 -218
- d_app.py +0 -131
.vscode/launch.json
CHANGED
@@ -19,8 +19,8 @@
|
|
19 |
"program": "/opt/miniconda3/envs/streamlit/bin/streamlit",
|
20 |
"args": [
|
21 |
"run",
|
22 |
-
|
23 |
-
"
|
24 |
]
|
25 |
}
|
26 |
]
|
|
|
19 |
"program": "/opt/miniconda3/envs/streamlit/bin/streamlit",
|
20 |
"args": [
|
21 |
"run",
|
22 |
+
"app.py"
|
23 |
+
// "debug_app.py"
|
24 |
]
|
25 |
}
|
26 |
]
|
app.py
CHANGED
@@ -3,6 +3,7 @@ from collections import deque
|
|
3 |
import os
|
4 |
import threading
|
5 |
import time
|
|
|
6 |
import av
|
7 |
import numpy as np
|
8 |
import streamlit as st
|
@@ -15,258 +16,116 @@ from sample_utils.turn import get_ice_servers
|
|
15 |
import json
|
16 |
from typing import List
|
17 |
|
|
|
|
|
|
|
18 |
from dotenv import load_dotenv
|
19 |
load_dotenv()
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
system_one = {
|
24 |
-
"audio_bit_rate": 16000,
|
25 |
-
# "audio_bit_rate": 32000,
|
26 |
-
# "audio_bit_rate": 48000,
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
|
33 |
-
system_one["video_detection_emotions"] = [
|
34 |
-
"a happy person",
|
35 |
-
"the person is happy",
|
36 |
-
"the person's emotional state is happy",
|
37 |
-
"a sad person",
|
38 |
-
"a scared person",
|
39 |
-
"a disgusted person",
|
40 |
-
"an angry person",
|
41 |
-
"a suprised person",
|
42 |
-
"a bored person",
|
43 |
-
"an interested person",
|
44 |
-
"a guilty person",
|
45 |
-
"an indiffert person",
|
46 |
-
"a distracted person",
|
47 |
-
]
|
48 |
|
49 |
-
|
50 |
-
# system_one["video_detection_emotions"] = [
|
51 |
-
# "Happiness",
|
52 |
-
# "Sadness",
|
53 |
-
# "Fear",
|
54 |
-
# "Disgust",
|
55 |
-
# "Anger",
|
56 |
-
# "Surprise",
|
57 |
-
# "Boredom",
|
58 |
-
# "Interest",
|
59 |
-
# "Excitement",
|
60 |
-
# "Guilt",
|
61 |
-
# "Shame",
|
62 |
-
# "Relief",
|
63 |
-
# "Love",
|
64 |
-
# "Embarrassment",
|
65 |
-
# "Pride",
|
66 |
-
# "Envy",
|
67 |
-
# "Jealousy",
|
68 |
-
# "Anxiety",
|
69 |
-
# "Hope",
|
70 |
-
# "Despair",
|
71 |
-
# "Frustration",
|
72 |
-
# "Confusion",
|
73 |
-
# "Curiosity",
|
74 |
-
# "Contentment",
|
75 |
-
# "Indifference",
|
76 |
-
# "Anticipation",
|
77 |
-
# "Gratitude",
|
78 |
-
# "Bitterness"
|
79 |
-
# ]
|
80 |
-
system_one["video_detection_engement"] = [
|
81 |
-
"the person is engaged in the conversation",
|
82 |
-
"the person is not engaged in the conversation",
|
83 |
-
"the person is looking at me",
|
84 |
-
"the person is not looking at me",
|
85 |
-
"the person is talking to me",
|
86 |
-
"the person is not talking to me",
|
87 |
-
"the person is engaged",
|
88 |
-
"the person is talking",
|
89 |
-
"the person is listening",
|
90 |
-
]
|
91 |
-
system_one["video_detection_present"] = [
|
92 |
-
"the view from a webcam",
|
93 |
-
"the view from a webcam we see a person",
|
94 |
-
# "the view from a webcam. I see a person",
|
95 |
-
# "the view from a webcam. The person is looking at the camera",
|
96 |
-
# "i am a webcam",
|
97 |
-
# "i am a webcam and i see a person",
|
98 |
-
# "i am a webcam and i see a person. The person is looking at me",
|
99 |
-
# "a person",
|
100 |
-
# "a person on a Zoom call",
|
101 |
-
# "a person on a FaceTime call",
|
102 |
-
# "a person on a WebCam call",
|
103 |
-
# "no one",
|
104 |
-
# " ",
|
105 |
-
# "multiple people",
|
106 |
-
# "a group of people",
|
107 |
-
]
|
108 |
|
109 |
system_one_audio_status = st.empty()
|
110 |
|
111 |
-
|
112 |
playing = st.checkbox("Playing", value=True)
|
113 |
|
114 |
-
|
115 |
-
|
116 |
-
pass
|
117 |
-
|
118 |
-
# create frames to be returned.
|
119 |
-
new_frames = []
|
120 |
-
for frame in frames:
|
121 |
-
input_array = frame.to_ndarray()
|
122 |
-
new_frame = av.AudioFrame.from_ndarray(
|
123 |
-
np.zeros(input_array.shape, dtype=input_array.dtype),
|
124 |
-
layout=frame.layout.name,
|
125 |
-
)
|
126 |
-
new_frame.sample_rate = frame.sample_rate
|
127 |
-
new_frames.append(new_frame)
|
128 |
-
|
129 |
-
# TODO: replace with the audio we want to send to the other side.
|
130 |
-
|
131 |
-
return new_frames
|
132 |
|
133 |
-
|
134 |
-
from clip_transform import CLIPTransform
|
135 |
-
clip_transform = CLIPTransform()
|
136 |
|
137 |
-
|
138 |
-
|
139 |
-
chat_pipeline = ChatPipeline()
|
140 |
-
await chat_pipeline.start()
|
141 |
|
142 |
-
|
|
|
|
|
|
|
|
|
143 |
|
144 |
-
|
145 |
-
system_one["video_detection_emotions_embeddings"] = embeddings
|
146 |
|
147 |
-
embeddings = clip_transform.text_to_embeddings(system_one["video_detection_engement"])
|
148 |
-
system_one["video_detection_engement_embeddings"] = embeddings
|
149 |
|
150 |
-
embeddings = clip_transform.text_to_embeddings(system_one["video_detection_present"])
|
151 |
-
system_one["video_detection_present_embeddings"] = embeddings
|
152 |
|
153 |
system_one_audio_status.write("Initializing webrtc_streamer")
|
154 |
webrtc_ctx = webrtc_streamer(
|
155 |
key="charles",
|
156 |
desired_playing_state=playing,
|
157 |
-
|
158 |
-
|
159 |
-
queued_video_frames_callback=queued_video_frames_callback,
|
160 |
mode=WebRtcMode.SENDRECV,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
rtc_configuration={"iceServers": get_ice_servers()},
|
162 |
async_processing=True,
|
163 |
)
|
164 |
|
165 |
-
|
166 |
if not webrtc_ctx.state.playing:
|
167 |
exit
|
168 |
|
169 |
-
system_one_audio_status.write("Initializing
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
system_one_audio_history = []
|
175 |
-
system_one_audio_history_output = st.empty()
|
176 |
-
|
177 |
-
|
178 |
-
sound_chunk = pydub.AudioSegment.empty()
|
179 |
-
current_video_embedding = None
|
180 |
-
current_video_embedding_timestamp = time.monotonic()
|
181 |
-
|
182 |
-
|
183 |
-
def get_dot_similarities(video_embedding, embeddings, embeddings_labels):
|
184 |
-
dot_product = torch.mm(embeddings, video_embedding.T)
|
185 |
-
similarity_image_label = [(float("{:.4f}".format(dot_product[i][0])), embeddings_labels[i]) for i in range(len(embeddings_labels))]
|
186 |
-
similarity_image_label.sort(reverse=True)
|
187 |
-
return similarity_image_label
|
188 |
|
189 |
-
def get_top_3_similarities_as_a_string(video_embedding, embeddings, embeddings_labels):
|
190 |
-
similarities = get_dot_similarities(video_embedding, embeddings, embeddings_labels)
|
191 |
-
top_3 = ""
|
192 |
-
range_len = 3 if len(similarities) > 3 else len(similarities)
|
193 |
-
for i in range(range_len):
|
194 |
-
top_3 += f"{similarities[i][1]} ({similarities[i][0]}) "
|
195 |
-
return top_3
|
196 |
|
197 |
-
|
198 |
-
|
199 |
-
if webrtc_ctx.state.playing:
|
200 |
-
# handle video
|
201 |
-
video_frames = []
|
202 |
-
with video_frames_deque_lock:
|
203 |
-
while len(video_frames_deque) > 0:
|
204 |
-
frame = video_frames_deque.popleft()
|
205 |
-
video_frames.append(frame)
|
206 |
-
get_embeddings = False
|
207 |
-
get_embeddings |= current_video_embedding is None
|
208 |
-
current_time = time.monotonic()
|
209 |
-
elapsed_time = current_time - current_video_embedding_timestamp
|
210 |
-
get_embeddings |= elapsed_time > 1. / system_one['vision_embeddings_fps']
|
211 |
-
if get_embeddings and len(video_frames) > 0:
|
212 |
-
current_video_embedding_timestamp = current_time
|
213 |
-
current_video_embedding = clip_transform.image_to_embeddings(video_frames[-1].to_ndarray())
|
214 |
-
|
215 |
-
emotions_top_3 = get_top_3_similarities_as_a_string(current_video_embedding, system_one["video_detection_emotions_embeddings"], system_one["video_detection_emotions"])
|
216 |
-
engagement_top_3 = get_top_3_similarities_as_a_string(current_video_embedding, system_one["video_detection_engement_embeddings"], system_one["video_detection_engement"])
|
217 |
-
present_top_3 = get_top_3_similarities_as_a_string(current_video_embedding, system_one["video_detection_present_embeddings"], system_one["video_detection_present"])
|
218 |
-
|
219 |
-
# table_content = "**System 1 Video:**\n\n"
|
220 |
-
table_content = "| System 1 Video | |\n| --- | --- |\n"
|
221 |
-
table_content += f"| Present | {present_top_3} |\n"
|
222 |
-
table_content += f"| Emotion | {emotions_top_3} |\n"
|
223 |
-
table_content += f"| Engagement | {engagement_top_3} |\n"
|
224 |
-
system_one_video_output.markdown(table_content)
|
225 |
-
# system_one_video_output.markdown(f"**System 1 Video:** \n [Emotion: {emotions_top_3}], \n [Engagement: {engagement_top_3}], \n [Present: {present_top_3}] ")
|
226 |
-
# for similarity, image_label in similarity_image_label:
|
227 |
-
# print (f"{similarity} {image_label}")
|
228 |
-
|
229 |
-
|
230 |
-
if len(audio_frames) == 0:
|
231 |
-
time.sleep(0.1)
|
232 |
-
system_one_audio_status.write("No frame arrived.")
|
233 |
-
continue
|
234 |
-
|
235 |
-
system_one_audio_status.write("Running. Say something!")
|
236 |
-
|
237 |
-
for audio_frame in audio_frames:
|
238 |
-
sound = pydub.AudioSegment(
|
239 |
-
data=audio_frame.to_ndarray().tobytes(),
|
240 |
-
sample_width=audio_frame.format.bytes,
|
241 |
-
frame_rate=audio_frame.sample_rate,
|
242 |
-
channels=len(audio_frame.layout.channels),
|
243 |
-
)
|
244 |
-
sound = sound.set_channels(1)
|
245 |
-
sound = sound.set_frame_rate(system_one['audio_bit_rate'])
|
246 |
-
sound_chunk += sound
|
247 |
-
|
248 |
-
if len(sound_chunk) > 0:
|
249 |
-
buffer = np.array(sound_chunk.get_array_of_samples())
|
250 |
-
text, speaker_finished = do_work(buffer.tobytes())
|
251 |
-
system_one_audio_output.markdown(f"**System 1 Audio:** {text}")
|
252 |
-
if speaker_finished and len(text) > 0:
|
253 |
-
system_one_audio_history.append(text)
|
254 |
-
if len(system_one_audio_history) > 10:
|
255 |
-
system_one_audio_history = system_one_audio_history[-10:]
|
256 |
-
table_content = "| System 1 Audio History |\n| --- |\n"
|
257 |
-
table_content += "\n".join([f"| {item} |" for item in reversed(system_one_audio_history)])
|
258 |
-
system_one_audio_history_output.markdown(table_content)
|
259 |
-
await chat_pipeline.enqueue(text)
|
260 |
-
sound_chunk = pydub.AudioSegment.empty()
|
261 |
-
|
262 |
-
else:
|
263 |
system_one_audio_status.write("Stopped.")
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
269 |
|
270 |
|
271 |
if __name__ == "__main__":
|
272 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import os
|
4 |
import threading
|
5 |
import time
|
6 |
+
import traceback
|
7 |
import av
|
8 |
import numpy as np
|
9 |
import streamlit as st
|
|
|
16 |
import json
|
17 |
from typing import List
|
18 |
|
19 |
+
from vosk import SetLogLevel, Model, KaldiRecognizer
|
20 |
+
SetLogLevel(-1) # mutes vosk verbosity
|
21 |
+
|
22 |
from dotenv import load_dotenv
|
23 |
load_dotenv()
|
24 |
|
25 |
+
webrtc_ctx = None
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
+
# Initialize Ray
|
28 |
+
import ray
|
29 |
+
if not ray.is_initialized():
|
30 |
+
# Try to connect to a running Ray cluster
|
31 |
+
ray_address = os.getenv('RAY_ADDRESS')
|
32 |
+
if ray_address:
|
33 |
+
ray.init(ray_address, namespace="project_charles")
|
34 |
+
else:
|
35 |
+
ray.init(namespace="project_charles")
|
36 |
|
37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
+
async def main():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
system_one_audio_status = st.empty()
|
42 |
|
|
|
43 |
playing = st.checkbox("Playing", value=True)
|
44 |
|
45 |
+
system_one_audio_status.write("Initializing streaming")
|
46 |
+
system_one_audio_output = st.empty()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
+
system_one_video_output = st.empty()
|
|
|
|
|
49 |
|
50 |
+
system_one_audio_history = []
|
51 |
+
system_one_audio_history_output = st.empty()
|
|
|
|
|
52 |
|
53 |
+
# Initialize resources if not already done
|
54 |
+
system_one_audio_status.write("Initializing streaming")
|
55 |
+
if "streamlit_av_queue" not in st.session_state:
|
56 |
+
from streamlit_av_queue import StreamlitAVQueue
|
57 |
+
st.session_state.streamlit_av_queue = StreamlitAVQueue()
|
58 |
|
59 |
+
system_one_audio_status.write("resources referecned")
|
|
|
60 |
|
|
|
|
|
61 |
|
|
|
|
|
62 |
|
63 |
system_one_audio_status.write("Initializing webrtc_streamer")
|
64 |
webrtc_ctx = webrtc_streamer(
|
65 |
key="charles",
|
66 |
desired_playing_state=playing,
|
67 |
+
queued_audio_frames_callback=st.session_state.streamlit_av_queue.queued_audio_frames_callback,
|
68 |
+
queued_video_frames_callback=st.session_state.streamlit_av_queue.queued_video_frames_callback,
|
|
|
69 |
mode=WebRtcMode.SENDRECV,
|
70 |
+
media_stream_constraints={
|
71 |
+
"video": True,
|
72 |
+
"audio": {
|
73 |
+
"sampleRate": 48000,
|
74 |
+
"sampleSize": 16,
|
75 |
+
"noiseSuppression": True,
|
76 |
+
"echoCancellation": True,
|
77 |
+
"channelCount": 1,
|
78 |
+
}
|
79 |
+
},
|
80 |
rtc_configuration={"iceServers": get_ice_servers()},
|
81 |
async_processing=True,
|
82 |
)
|
83 |
|
|
|
84 |
if not webrtc_ctx.state.playing:
|
85 |
exit
|
86 |
|
87 |
+
system_one_audio_status.write("Initializing speech")
|
88 |
+
|
89 |
+
from charles_actor import CharlesActor
|
90 |
+
charles_actor = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
+
try:
|
94 |
+
while True:
|
95 |
+
if not webrtc_ctx.state.playing:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
system_one_audio_status.write("Stopped.")
|
97 |
+
await asyncio.sleep(0.1)
|
98 |
+
continue
|
99 |
+
if charles_actor is None:
|
100 |
+
try:
|
101 |
+
charles_actor = ray.get_actor("CharlesActor")
|
102 |
+
system_one_audio_status.write("Charles is here.")
|
103 |
+
except ValueError as e:
|
104 |
+
system_one_audio_status.write("Charles is sleeping.")
|
105 |
+
pass
|
106 |
+
if charles_actor is not None:
|
107 |
+
try:
|
108 |
+
audio_history = await charles_actor.get_system_one_audio_history_output.remote()
|
109 |
+
system_one_audio_history_output.markdown(audio_history)
|
110 |
+
except Exception as e:
|
111 |
+
# assume we disconnected
|
112 |
+
charles_actor = None
|
113 |
+
await asyncio.sleep(0.1)
|
114 |
+
|
115 |
+
except Exception as e:
|
116 |
+
print(f"An error occurred: {e}")
|
117 |
+
traceback.print_exc()
|
118 |
+
raise e
|
119 |
|
120 |
|
121 |
if __name__ == "__main__":
|
122 |
+
try:
|
123 |
+
asyncio.run(main())
|
124 |
+
except Exception as e:
|
125 |
+
if webrtc_ctx is not None:
|
126 |
+
del webrtc_ctx
|
127 |
+
webrtc_ctx = None
|
128 |
+
if "streamlit_av_queue" in st.session_state:
|
129 |
+
del st.session_state.streamlit_av_queue
|
130 |
+
finally:
|
131 |
+
pass
|
d_app.py
DELETED
@@ -1,131 +0,0 @@
|
|
1 |
-
import asyncio
|
2 |
-
from collections import deque
|
3 |
-
import os
|
4 |
-
import threading
|
5 |
-
import time
|
6 |
-
import traceback
|
7 |
-
import av
|
8 |
-
import numpy as np
|
9 |
-
import streamlit as st
|
10 |
-
from streamlit_webrtc import WebRtcMode, webrtc_streamer
|
11 |
-
import pydub
|
12 |
-
import torch
|
13 |
-
# import av
|
14 |
-
# import cv2
|
15 |
-
from sample_utils.turn import get_ice_servers
|
16 |
-
import json
|
17 |
-
from typing import List
|
18 |
-
|
19 |
-
from vosk import SetLogLevel, Model, KaldiRecognizer
|
20 |
-
SetLogLevel(-1) # mutes vosk verbosity
|
21 |
-
|
22 |
-
from dotenv import load_dotenv
|
23 |
-
load_dotenv()
|
24 |
-
|
25 |
-
webrtc_ctx = None
|
26 |
-
|
27 |
-
# Initialize Ray
|
28 |
-
import ray
|
29 |
-
if not ray.is_initialized():
|
30 |
-
# Try to connect to a running Ray cluster
|
31 |
-
ray_address = os.getenv('RAY_ADDRESS')
|
32 |
-
if ray_address:
|
33 |
-
ray.init(ray_address, namespace="project_charles")
|
34 |
-
else:
|
35 |
-
ray.init(namespace="project_charles")
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
async def main():
|
40 |
-
|
41 |
-
system_one_audio_status = st.empty()
|
42 |
-
|
43 |
-
playing = st.checkbox("Playing", value=True)
|
44 |
-
|
45 |
-
system_one_audio_status.write("Initializing streaming")
|
46 |
-
system_one_audio_output = st.empty()
|
47 |
-
|
48 |
-
system_one_video_output = st.empty()
|
49 |
-
|
50 |
-
system_one_audio_history = []
|
51 |
-
system_one_audio_history_output = st.empty()
|
52 |
-
|
53 |
-
# Initialize resources if not already done
|
54 |
-
system_one_audio_status.write("Initializing streaming")
|
55 |
-
if "streamlit_av_queue" not in st.session_state:
|
56 |
-
from streamlit_av_queue import StreamlitAVQueue
|
57 |
-
st.session_state.streamlit_av_queue = StreamlitAVQueue()
|
58 |
-
|
59 |
-
system_one_audio_status.write("resources referecned")
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
system_one_audio_status.write("Initializing webrtc_streamer")
|
64 |
-
webrtc_ctx = webrtc_streamer(
|
65 |
-
key="charles",
|
66 |
-
desired_playing_state=playing,
|
67 |
-
queued_audio_frames_callback=st.session_state.streamlit_av_queue.queued_audio_frames_callback,
|
68 |
-
queued_video_frames_callback=st.session_state.streamlit_av_queue.queued_video_frames_callback,
|
69 |
-
mode=WebRtcMode.SENDRECV,
|
70 |
-
media_stream_constraints={
|
71 |
-
"video": True,
|
72 |
-
"audio": {
|
73 |
-
"sampleRate": 48000,
|
74 |
-
"sampleSize": 16,
|
75 |
-
"noiseSuppression": True,
|
76 |
-
"echoCancellation": True,
|
77 |
-
"channelCount": 1,
|
78 |
-
}
|
79 |
-
},
|
80 |
-
rtc_configuration={"iceServers": get_ice_servers()},
|
81 |
-
async_processing=True,
|
82 |
-
)
|
83 |
-
|
84 |
-
if not webrtc_ctx.state.playing:
|
85 |
-
exit
|
86 |
-
|
87 |
-
system_one_audio_status.write("Initializing speech")
|
88 |
-
|
89 |
-
from charles_actor import CharlesActor
|
90 |
-
charles_actor = None
|
91 |
-
|
92 |
-
|
93 |
-
try:
|
94 |
-
while True:
|
95 |
-
if not webrtc_ctx.state.playing:
|
96 |
-
system_one_audio_status.write("Stopped.")
|
97 |
-
await asyncio.sleep(0.1)
|
98 |
-
continue
|
99 |
-
if charles_actor is None:
|
100 |
-
try:
|
101 |
-
charles_actor = ray.get_actor("CharlesActor")
|
102 |
-
system_one_audio_status.write("Charles is here.")
|
103 |
-
except ValueError as e:
|
104 |
-
system_one_audio_status.write("Charles is sleeping.")
|
105 |
-
pass
|
106 |
-
if charles_actor is not None:
|
107 |
-
try:
|
108 |
-
audio_history = await charles_actor.get_system_one_audio_history_output.remote()
|
109 |
-
system_one_audio_history_output.markdown(audio_history)
|
110 |
-
except Exception as e:
|
111 |
-
# assume we disconnected
|
112 |
-
charles_actor = None
|
113 |
-
await asyncio.sleep(0.1)
|
114 |
-
|
115 |
-
except Exception as e:
|
116 |
-
print(f"An error occurred: {e}")
|
117 |
-
traceback.print_exc()
|
118 |
-
raise e
|
119 |
-
|
120 |
-
|
121 |
-
if __name__ == "__main__":
|
122 |
-
try:
|
123 |
-
asyncio.run(main())
|
124 |
-
except Exception as e:
|
125 |
-
if webrtc_ctx is not None:
|
126 |
-
del webrtc_ctx
|
127 |
-
webrtc_ctx = None
|
128 |
-
if "streamlit_av_queue" in st.session_state:
|
129 |
-
del st.session_state.streamlit_av_queue
|
130 |
-
finally:
|
131 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|