sohojoe commited on
Commit
5a17040
β€’
1 Parent(s): 6130167

better formatting

Browse files
Files changed (2) hide show
  1. app.py +11 -22
  2. charles_actor.py +26 -23
app.py CHANGED
@@ -39,34 +39,24 @@ if not ray.is_initialized():
39
 
40
 
41
  async def main():
42
-
43
- system_one_audio_status = st.empty()
44
-
45
-
46
- system_one_audio_status.write("Initializing streaming")
47
-
48
- # system_one_audio_output = st.empty()
49
- # system_one_video_output = st.empty()
50
- # system_one_audio_history = []
51
 
52
  col1, col2 = st.columns(2)
53
 
54
  with col1:
55
- listening = st.checkbox("Listen", value=True)
56
- looking = st.checkbox("Look", value=False)
 
 
 
57
  charles_actor_debug_output = st.empty()
58
  environment_state_ouput = st.empty()
59
 
60
- # Initialize resources if not already done
61
- system_one_audio_status.write("Initializing streaming")
62
- if "streamlit_av_queue" not in st.session_state:
63
- from streamlit_av_queue import StreamlitAVQueue
64
- st.session_state.streamlit_av_queue = StreamlitAVQueue()
65
-
66
- system_one_audio_status.write("resources referecned")
67
-
68
- system_one_audio_status.write("Initializing webrtc_streamer")
69
  with col2:
 
 
 
 
70
  playing = st.checkbox("Playing", value=True)
71
  webrtc_ctx = webrtc_streamer(
72
  key="charles",
@@ -87,12 +77,11 @@ async def main():
87
  rtc_configuration={"iceServers": get_ice_servers()},
88
  async_processing=True,
89
  )
 
90
 
91
  if not webrtc_ctx.state.playing:
92
  exit
93
 
94
- system_one_audio_status.write("Initializing speech")
95
-
96
  from charles_actor import CharlesActor
97
  charles_actor = None
98
 
 
39
 
40
 
41
  async def main():
42
+ st.title("Project Charles")
 
 
 
 
 
 
 
 
43
 
44
  col1, col2 = st.columns(2)
45
 
46
  with col1:
47
+ nested_col1, nested_col2 = st.columns(2)
48
+ with nested_col1:
49
+ listening = st.checkbox("Listen", value=True)
50
+ with nested_col2:
51
+ looking = st.checkbox("Look", value=False)
52
  charles_actor_debug_output = st.empty()
53
  environment_state_ouput = st.empty()
54
 
 
 
 
 
 
 
 
 
 
55
  with col2:
56
+ if "streamlit_av_queue" not in st.session_state:
57
+ from streamlit_av_queue import StreamlitAVQueue
58
+ st.session_state.streamlit_av_queue = StreamlitAVQueue()
59
+
60
  playing = st.checkbox("Playing", value=True)
61
  webrtc_ctx = webrtc_streamer(
62
  key="charles",
 
77
  rtc_configuration={"iceServers": get_ice_servers()},
78
  async_processing=True,
79
  )
80
+ system_one_audio_status = st.markdown("Initializing streaming")
81
 
82
  if not webrtc_ctx.state.playing:
83
  exit
84
 
 
 
85
  from charles_actor import CharlesActor
86
  charles_actor = None
87
 
charles_actor.py CHANGED
@@ -61,13 +61,19 @@ class CharlesActor:
61
  await self._initalize_resources()
62
 
63
  debug_output_history = []
 
 
 
 
 
 
 
 
64
  def add_debug_output(output):
65
  debug_output_history.append(output)
66
  if len(debug_output_history) > 10:
67
  debug_output_history.pop(0)
68
- table_content = "| Charles Actor debug history |\n| --- |\n"
69
- table_content += "\n".join([f"| {item} |" for item in reversed(debug_output_history)])
70
- self._charles_actor_debug_output = table_content
71
 
72
  self._state = "Waiting for input"
73
  total_video_frames = 0
@@ -123,12 +129,12 @@ class CharlesActor:
123
 
124
  if speaker_finished and len(prompt) > 0 and prompt not in prompts_to_ignore:
125
  print(f"Prompt: {prompt}")
126
- lines = []
127
  for i, response in enumerate(current_responses):
128
- line = "πŸ€– " if len(lines) == 0 else "... "
129
- line += f"{response} [{speech_chunks_per_response[i]}]"
130
- lines.append(line)
131
- for line in reversed(lines):
132
  add_debug_output(line)
133
  add_debug_output(f"πŸ‘¨ {prompt}")
134
  current_responses = []
@@ -156,23 +162,20 @@ class CharlesActor:
156
  response_id = chunk['llm_sentence_id']
157
  speech_chunks_per_response[response_id] += 1
158
 
159
- table_content = "| Charles Actor debug history |\n| --- |\n"
160
- debug_output_history_copy = debug_output_history.copy()
161
- if len(robot_preview_text) > 0:
162
- debug_output_history_copy.append(robot_preview_text)
163
- lines = []
164
  for i, response in enumerate(current_responses):
165
- line = "πŸ€– " if len(lines) == 0 else "... "
166
- line += f"{response} [{speech_chunks_per_response[i]}]"
167
- lines.append(line)
168
- for line in reversed(lines):
169
- debug_output_history_copy.append(line)
 
170
  if len(human_preview_text) > 0:
171
- debug_output_history_copy.append(human_preview_text)
172
- if len(debug_output_history_copy) > 10:
173
- debug_output_history_copy.pop(0)
174
- table_content += "\n".join([f"| {item} |" for item in reversed(debug_output_history_copy)])
175
- self._charles_actor_debug_output = table_content
176
 
177
 
178
  await asyncio.sleep(0.01)
 
61
  await self._initalize_resources()
62
 
63
  debug_output_history = []
64
+
65
+ def render_debug_output(list_of_strings):
66
+ table_content = "##### Chat history\n"
67
+ for item in reversed(list_of_strings):
68
+ # table_content += f"\n```markdown\n{item}\n```\n"
69
+ table_content += f"\n{item}\n"
70
+ self._charles_actor_debug_output = table_content
71
+
72
  def add_debug_output(output):
73
  debug_output_history.append(output)
74
  if len(debug_output_history) > 10:
75
  debug_output_history.pop(0)
76
+ render_debug_output(debug_output_history)
 
 
77
 
78
  self._state = "Waiting for input"
79
  total_video_frames = 0
 
129
 
130
  if speaker_finished and len(prompt) > 0 and prompt not in prompts_to_ignore:
131
  print(f"Prompt: {prompt}")
132
+ line = ""
133
  for i, response in enumerate(current_responses):
134
+ line += "πŸ€– " if len(line) == 0 else ""
135
+ # line += f"{response} [{speech_chunks_per_response[i]}] \n"
136
+ line += f"[{speech_chunks_per_response[i]}] {response} \n"
137
+ if len(line) > 0:
138
  add_debug_output(line)
139
  add_debug_output(f"πŸ‘¨ {prompt}")
140
  current_responses = []
 
162
  response_id = chunk['llm_sentence_id']
163
  speech_chunks_per_response[response_id] += 1
164
 
165
+ list_of_strings = debug_output_history.copy()
166
+ line = ""
 
 
 
167
  for i, response in enumerate(current_responses):
168
+ line += "πŸ€– " if len(line) == 0 else ""
169
+ line += f"[{speech_chunks_per_response[i]}] {response} \n"
170
+ # line += f"{response} [{speech_chunks_per_response[i]}] \n"
171
+ if len(robot_preview_text) > 0:
172
+ line += robot_preview_text+" \n"
173
+ list_of_strings.append(line)
174
  if len(human_preview_text) > 0:
175
+ list_of_strings.append(human_preview_text)
176
+ if len(list_of_strings) > 10:
177
+ list_of_strings.pop(0)
178
+ render_debug_output(list_of_strings)
 
179
 
180
 
181
  await asyncio.sleep(0.01)