Spaces:
Sleeping
Sleeping
wip - stop response if user interrupts
Browse files- charles_app.py +12 -8
- respond_to_prompt_async.py +7 -3
charles_app.py
CHANGED
@@ -60,6 +60,14 @@ class CharlesApp:
|
|
60 |
|
61 |
self._needs_init = True
|
62 |
self.set_state("010 - Initialized")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
async def start(self):
|
65 |
if self._needs_init:
|
@@ -137,9 +145,7 @@ class CharlesApp:
|
|
137 |
prompt = additional_prompt + ". " + prompt
|
138 |
await add_debug_output(f"π¨ {prompt}")
|
139 |
self._prompt_manager.replace_or_append_user_message(prompt)
|
140 |
-
|
141 |
-
await self._respond_to_prompt.terminate()
|
142 |
-
self._respond_to_prompt_task.cancel()
|
143 |
self._respond_to_prompt = RespondToPromptAsync(self._response_state_manager, self._audio_output_queue)
|
144 |
self._respond_to_prompt_task = asyncio.create_task(self._respond_to_prompt.run(prompt, self._prompt_manager.messages))
|
145 |
additional_prompt = None
|
@@ -153,15 +159,13 @@ class CharlesApp:
|
|
153 |
if len(previous_prompt) > 0 and not has_spoken_for_this_prompt:
|
154 |
additional_prompt = previous_prompt
|
155 |
has_spoken_for_this_prompt = True
|
156 |
-
|
157 |
-
await self._respond_to_prompt.terminate()
|
158 |
-
self._respond_to_prompt_task.cancel()
|
159 |
-
self._respond_to_prompt_task = None
|
160 |
-
self._respond_to_prompt = None
|
161 |
response_step_obs, response_state = self._response_state_manager.reset_episode()
|
162 |
if additional_prompt is not None:
|
163 |
prompt = additional_prompt + ". " + prompt
|
164 |
human_preview_text = f"π¨β {prompt}"
|
|
|
|
|
165 |
|
166 |
# i choose to add each line of responce one at a time as them come in
|
167 |
for new_response in response_step_obs.llm_responses:
|
|
|
60 |
|
61 |
self._needs_init = True
|
62 |
self.set_state("010 - Initialized")
|
63 |
+
|
64 |
+
async def cancel_response_task(self):
|
65 |
+
if self._respond_to_prompt_task is None:
|
66 |
+
return
|
67 |
+
await self._respond_to_prompt.terminate()
|
68 |
+
self._respond_to_prompt_task.cancel()
|
69 |
+
self._respond_to_prompt_task = None
|
70 |
+
self._respond_to_prompt = None
|
71 |
|
72 |
async def start(self):
|
73 |
if self._needs_init:
|
|
|
145 |
prompt = additional_prompt + ". " + prompt
|
146 |
await add_debug_output(f"π¨ {prompt}")
|
147 |
self._prompt_manager.replace_or_append_user_message(prompt)
|
148 |
+
await self.cancel_response_task()
|
|
|
|
|
149 |
self._respond_to_prompt = RespondToPromptAsync(self._response_state_manager, self._audio_output_queue)
|
150 |
self._respond_to_prompt_task = asyncio.create_task(self._respond_to_prompt.run(prompt, self._prompt_manager.messages))
|
151 |
additional_prompt = None
|
|
|
159 |
if len(previous_prompt) > 0 and not has_spoken_for_this_prompt:
|
160 |
additional_prompt = previous_prompt
|
161 |
has_spoken_for_this_prompt = True
|
162 |
+
await self.cancel_response_task()
|
|
|
|
|
|
|
|
|
163 |
response_step_obs, response_state = self._response_state_manager.reset_episode()
|
164 |
if additional_prompt is not None:
|
165 |
prompt = additional_prompt + ". " + prompt
|
166 |
human_preview_text = f"π¨β {prompt}"
|
167 |
+
# await self.cancel_response_task() # TODO re-enable to interupt when user speaks
|
168 |
+
|
169 |
|
170 |
# i choose to add each line of responce one at a time as them come in
|
171 |
for new_response in response_step_obs.llm_responses:
|
respond_to_prompt_async.py
CHANGED
@@ -110,11 +110,15 @@ class RespondToPromptAsync:
|
|
110 |
# ray.kill(self.ffmpeg_converter)
|
111 |
|
112 |
# Flush all queues
|
|
|
|
|
|
|
|
|
113 |
while not self.llm_sentence_queue.empty():
|
114 |
-
|
115 |
while not self.speech_chunk_queue.empty():
|
116 |
-
|
117 |
for sentence_queue in self.sentence_queues:
|
118 |
while not sentence_queue.empty():
|
119 |
-
|
120 |
|
|
|
110 |
# ray.kill(self.ffmpeg_converter)
|
111 |
|
112 |
# Flush all queues
|
113 |
+
# TODO re-enable to interupt when user speaks
|
114 |
+
# while not self.audio_output_queue.empty():
|
115 |
+
# await self.audio_output_queue.get_async()
|
116 |
+
# # await self.audio_output_queue.get_async(block=False)
|
117 |
while not self.llm_sentence_queue.empty():
|
118 |
+
self.llm_sentence_queue.get_nowait()
|
119 |
while not self.speech_chunk_queue.empty():
|
120 |
+
self.speech_chunk_queue.get_nowait()
|
121 |
for sentence_queue in self.sentence_queues:
|
122 |
while not sentence_queue.empty():
|
123 |
+
sentence_queue.get_nowait()
|
124 |
|