Spaces:
Runtime error
Runtime error
first version streaming via webrtc but quality sucks
Browse files- charles_actor.py +8 -1
- debug_app.py +1 -0
- ffmpeg_converter_actor.py +4 -3
- respond_to_prompt_actor.py +24 -4
- streamlit_av_queue.py +30 -9
- webrtc_av_queue_actor.py +11 -0
charles_actor.py
CHANGED
@@ -22,6 +22,13 @@ class CharlesActor:
|
|
22 |
print("000")
|
23 |
from streamlit_av_queue import StreamlitAVQueue
|
24 |
self._streamlit_av_queue = StreamlitAVQueue()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
print("002")
|
27 |
from speech_to_text_vosk_actor import SpeechToTextVoskActor
|
@@ -29,7 +36,7 @@ class CharlesActor:
|
|
29 |
|
30 |
print("003")
|
31 |
from respond_to_prompt_actor import RespondToPromptActor
|
32 |
-
self._respond_to_prompt_actor = RespondToPromptActor.remote()
|
33 |
|
34 |
self._debug_queue = [
|
35 |
# "hello, how are you today?",
|
|
|
22 |
print("000")
|
23 |
from streamlit_av_queue import StreamlitAVQueue
|
24 |
self._streamlit_av_queue = StreamlitAVQueue()
|
25 |
+
self._out_audio_queue = self._streamlit_av_queue.get_out_audio_queue()
|
26 |
+
|
27 |
+
print("001")
|
28 |
+
from ffmpeg_converter_actor import FFMpegConverterActor
|
29 |
+
self._ffmpeg_converter_actor = FFMpegConverterActor.remote(self._out_audio_queue)
|
30 |
+
await self._ffmpeg_converter_actor.start_process.remote()
|
31 |
+
self._ffmpeg_converter_actor.run.remote()
|
32 |
|
33 |
print("002")
|
34 |
from speech_to_text_vosk_actor import SpeechToTextVoskActor
|
|
|
36 |
|
37 |
print("003")
|
38 |
from respond_to_prompt_actor import RespondToPromptActor
|
39 |
+
self._respond_to_prompt_actor = RespondToPromptActor.remote(self._ffmpeg_converter_actor)
|
40 |
|
41 |
self._debug_queue = [
|
42 |
# "hello, how are you today?",
|
debug_app.py
CHANGED
@@ -61,6 +61,7 @@ def process_frame(old_frame):
|
|
61 |
|
62 |
if not converter_queue.empty():
|
63 |
frame_as_bytes = converter_queue.get()
|
|
|
64 |
samples = np.frombuffer(frame_as_bytes, dtype=np.int16)
|
65 |
else:
|
66 |
# create a byte array of zeros
|
|
|
61 |
|
62 |
if not converter_queue.empty():
|
63 |
frame_as_bytes = converter_queue.get()
|
64 |
+
# print(f"frame_as_bytes: {len(frame_as_bytes)}")
|
65 |
samples = np.frombuffer(frame_as_bytes, dtype=np.int16)
|
66 |
else:
|
67 |
# create a byte array of zeros
|
ffmpeg_converter_actor.py
CHANGED
@@ -7,7 +7,7 @@ from ray.util.queue import Queue
|
|
7 |
class FFMpegConverterActor:
|
8 |
def __init__(self, queue: Queue, buffer_size: int = 1920, output_format: str='s16le'):
|
9 |
self.queue = queue
|
10 |
-
self.buffer_size =
|
11 |
self.output_format = output_format
|
12 |
|
13 |
self.input_pipe = None
|
@@ -17,7 +17,8 @@ class FFMpegConverterActor:
|
|
17 |
|
18 |
async def run(self):
|
19 |
while True:
|
20 |
-
chunk = await self.output_pipe.
|
|
|
21 |
await self.queue.put_async(chunk)
|
22 |
|
23 |
async def start_process(self):
|
@@ -41,7 +42,7 @@ class FFMpegConverterActor:
|
|
41 |
self.output_pipe = self.process.stdout
|
42 |
|
43 |
assert self.input_pipe is not None, "input_pipe was not initialized"
|
44 |
-
print (f"input_pipe: {self.input_pipe}")
|
45 |
|
46 |
async def push_chunk(self, chunk):
|
47 |
try:
|
|
|
7 |
class FFMpegConverterActor:
|
8 |
def __init__(self, queue: Queue, buffer_size: int = 1920, output_format: str='s16le'):
|
9 |
self.queue = queue
|
10 |
+
self.buffer_size = buffer_size
|
11 |
self.output_format = output_format
|
12 |
|
13 |
self.input_pipe = None
|
|
|
17 |
|
18 |
async def run(self):
|
19 |
while True:
|
20 |
+
chunk = await self.output_pipe.readexactly(self.buffer_size)
|
21 |
+
# print(f"FFMpegConverterActor: read {len(chunk)} bytes")
|
22 |
await self.queue.put_async(chunk)
|
23 |
|
24 |
async def start_process(self):
|
|
|
42 |
self.output_pipe = self.process.stdout
|
43 |
|
44 |
assert self.input_pipe is not None, "input_pipe was not initialized"
|
45 |
+
# print (f"input_pipe: {self.input_pipe}")
|
46 |
|
47 |
async def push_chunk(self, chunk):
|
48 |
try:
|
respond_to_prompt_actor.py
CHANGED
@@ -71,15 +71,34 @@ class SpeechToSpeakerActor:
|
|
71 |
async def run(self):
|
72 |
while True:
|
73 |
audio_chunk = await self.input_queue.get_async()
|
|
|
74 |
self.chat_service.enqueue_speech_bytes_to_play([audio_chunk])
|
75 |
|
76 |
def cancel(self):
|
77 |
while not self.input_queue.empty():
|
78 |
self.input_queue.get()
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
@ray.remote
|
81 |
class RespondToPromptActor:
|
82 |
-
def __init__(self):
|
83 |
voice_id="2OviOUQc1JsQRQgNkVBj"
|
84 |
self.prompt_queue = Queue(maxsize=100)
|
85 |
self.llm_sentence_queue = Queue(maxsize=100)
|
@@ -87,18 +106,19 @@ class RespondToPromptActor:
|
|
87 |
|
88 |
self.prompt_to_llm = PromptToLLMActor.remote(self.prompt_queue, self.llm_sentence_queue, voice_id)
|
89 |
self.llm_sentence_to_speech = LLMSentanceToSpeechActor.remote(self.llm_sentence_queue, self.speech_chunk_queue, voice_id)
|
90 |
-
self.
|
|
|
91 |
|
92 |
# Start the pipeline components.
|
93 |
self.prompt_to_llm.run.remote()
|
94 |
self.llm_sentence_to_speech.run.remote()
|
95 |
-
self.
|
96 |
|
97 |
def enqueue_prompt(self, prompt):
|
98 |
print("flush anything queued")
|
99 |
prompt_to_llm_future = self.prompt_to_llm.cancel.remote()
|
100 |
llm_sentence_to_speech_future = self.llm_sentence_to_speech.cancel.remote()
|
101 |
-
speech_to_speaker_future = self.
|
102 |
ray.get([
|
103 |
prompt_to_llm_future,
|
104 |
llm_sentence_to_speech_future,
|
|
|
71 |
async def run(self):
|
72 |
while True:
|
73 |
audio_chunk = await self.input_queue.get_async()
|
74 |
+
# print (f"Got audio chunk {len(audio_chunk)}")
|
75 |
self.chat_service.enqueue_speech_bytes_to_play([audio_chunk])
|
76 |
|
77 |
def cancel(self):
|
78 |
while not self.input_queue.empty():
|
79 |
self.input_queue.get()
|
80 |
|
81 |
+
@ray.remote
|
82 |
+
class SpeechToConverterActor:
|
83 |
+
def __init__(self, input_queue, ffmpeg_converter_actor):
|
84 |
+
load_dotenv()
|
85 |
+
self.input_queue = input_queue
|
86 |
+
self.ffmpeg_converter_actor = ffmpeg_converter_actor
|
87 |
+
|
88 |
+
async def run(self):
|
89 |
+
while True:
|
90 |
+
audio_chunk = await self.input_queue.get_async()
|
91 |
+
# print (f"Got audio chunk {len(audio_chunk)}")
|
92 |
+
await self.ffmpeg_converter_actor.push_chunk.remote(audio_chunk)
|
93 |
+
|
94 |
+
def cancel(self):
|
95 |
+
while not self.input_queue.empty():
|
96 |
+
self.input_queue.get()
|
97 |
+
|
98 |
+
|
99 |
@ray.remote
|
100 |
class RespondToPromptActor:
|
101 |
+
def __init__(self, ffmpeg_converter_actor):
|
102 |
voice_id="2OviOUQc1JsQRQgNkVBj"
|
103 |
self.prompt_queue = Queue(maxsize=100)
|
104 |
self.llm_sentence_queue = Queue(maxsize=100)
|
|
|
106 |
|
107 |
self.prompt_to_llm = PromptToLLMActor.remote(self.prompt_queue, self.llm_sentence_queue, voice_id)
|
108 |
self.llm_sentence_to_speech = LLMSentanceToSpeechActor.remote(self.llm_sentence_queue, self.speech_chunk_queue, voice_id)
|
109 |
+
# self.speech_output = SpeechToSpeakerActor.remote(self.speech_chunk_queue, voice_id)
|
110 |
+
self.speech_output = SpeechToConverterActor.remote(self.speech_chunk_queue, ffmpeg_converter_actor)
|
111 |
|
112 |
# Start the pipeline components.
|
113 |
self.prompt_to_llm.run.remote()
|
114 |
self.llm_sentence_to_speech.run.remote()
|
115 |
+
self.speech_output.run.remote()
|
116 |
|
117 |
def enqueue_prompt(self, prompt):
|
118 |
print("flush anything queued")
|
119 |
prompt_to_llm_future = self.prompt_to_llm.cancel.remote()
|
120 |
llm_sentence_to_speech_future = self.llm_sentence_to_speech.cancel.remote()
|
121 |
+
speech_to_speaker_future = self.speech_output.cancel.remote()
|
122 |
ray.get([
|
123 |
prompt_to_llm_future,
|
124 |
llm_sentence_to_speech_future,
|
streamlit_av_queue.py
CHANGED
@@ -12,6 +12,7 @@ import torch
|
|
12 |
|
13 |
class StreamlitAVQueue:
|
14 |
def __init__(self, audio_bit_rate=16000):
|
|
|
15 |
self._audio_bit_rate = audio_bit_rate
|
16 |
self.queue_actor = WebRtcAVQueueActor.options(
|
17 |
name="WebRtcAVQueueActor",
|
@@ -56,14 +57,28 @@ class StreamlitAVQueue:
|
|
56 |
|
57 |
# return empty frames to avoid echo
|
58 |
new_frames = []
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
return new_frames
|
68 |
|
69 |
async def get_in_audio_frames_async(self) -> List[av.AudioFrame]:
|
@@ -72,4 +87,10 @@ class StreamlitAVQueue:
|
|
72 |
|
73 |
async def get_video_frames_async(self) -> List[av.AudioFrame]:
|
74 |
shared_tensors = await self.queue_actor.get_in_video_frames.remote()
|
75 |
-
return shared_tensors
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
class StreamlitAVQueue:
|
14 |
def __init__(self, audio_bit_rate=16000):
|
15 |
+
self._output_channels = 2
|
16 |
self._audio_bit_rate = audio_bit_rate
|
17 |
self.queue_actor = WebRtcAVQueueActor.options(
|
18 |
name="WebRtcAVQueueActor",
|
|
|
57 |
|
58 |
# return empty frames to avoid echo
|
59 |
new_frames = []
|
60 |
+
try:
|
61 |
+
for frame in frames:
|
62 |
+
required_samples = frame.samples
|
63 |
+
# print (f"frame: {frame.format.name}, {frame.layout.name}, {frame.sample_rate}, {frame.samples}")
|
64 |
+
assert frame.format.bytes == 2
|
65 |
+
assert frame.format.name == 's16'
|
66 |
+
frame_as_bytes = await self.queue_actor.get_out_audio_frame.remote()
|
67 |
+
if frame_as_bytes:
|
68 |
+
# print(f"frame_as_bytes: {len(frame_as_bytes)}")
|
69 |
+
assert len(frame_as_bytes) == frame.samples * frame.format.bytes
|
70 |
+
samples = np.frombuffer(frame_as_bytes, dtype=np.int16)
|
71 |
+
else:
|
72 |
+
samples = np.zeros((required_samples * 2 * 1), dtype=np.int16)
|
73 |
+
if self._output_channels == 2:
|
74 |
+
samples = np.vstack((samples, samples)).reshape((-1,), order='F')
|
75 |
+
samples = samples.reshape(1, -1)
|
76 |
+
layout = 'stereo' if self._output_channels == 2 else 'mono'
|
77 |
+
new_frame = av.AudioFrame.from_ndarray(samples, format='s16', layout=layout)
|
78 |
+
new_frame.sample_rate = frame.sample_rate
|
79 |
+
new_frames.append(new_frame)
|
80 |
+
except Exception as e:
|
81 |
+
print (e)
|
82 |
return new_frames
|
83 |
|
84 |
async def get_in_audio_frames_async(self) -> List[av.AudioFrame]:
|
|
|
87 |
|
88 |
async def get_video_frames_async(self) -> List[av.AudioFrame]:
|
89 |
shared_tensors = await self.queue_actor.get_in_video_frames.remote()
|
90 |
+
return shared_tensors
|
91 |
+
|
92 |
+
def get_out_audio_queue(self):
|
93 |
+
return self.queue_actor.get_out_audio_queue.remote()
|
94 |
+
|
95 |
+
# def get_out_audio_frame(self):
|
96 |
+
# return self.queue_actor.get_out_audio_frame.remote()
|
webrtc_av_queue_actor.py
CHANGED
@@ -10,6 +10,8 @@ class WebRtcAVQueueActor:
|
|
10 |
def __init__(self):
|
11 |
self.in_audio_queue = Queue(maxsize=100) # Adjust the size as needed
|
12 |
self.in_video_queue = Queue(maxsize=100) # Adjust the size as needed
|
|
|
|
|
13 |
|
14 |
def enqueue_in_video_frame(self, shared_tensor_ref):
|
15 |
if self.in_video_queue.full():
|
@@ -41,3 +43,12 @@ class WebRtcAVQueueActor:
|
|
41 |
shared_tensor_ref = self.in_video_queue.get()
|
42 |
video_frames.append(shared_tensor_ref)
|
43 |
return video_frames
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
def __init__(self):
|
11 |
self.in_audio_queue = Queue(maxsize=100) # Adjust the size as needed
|
12 |
self.in_video_queue = Queue(maxsize=100) # Adjust the size as needed
|
13 |
+
self.out_audio_queue = Queue(maxsize=100) # Adjust the size as needed
|
14 |
+
|
15 |
|
16 |
def enqueue_in_video_frame(self, shared_tensor_ref):
|
17 |
if self.in_video_queue.full():
|
|
|
43 |
shared_tensor_ref = self.in_video_queue.get()
|
44 |
video_frames.append(shared_tensor_ref)
|
45 |
return video_frames
|
46 |
+
|
47 |
+
def get_out_audio_queue(self):
|
48 |
+
return self.out_audio_queue
|
49 |
+
|
50 |
+
async def get_out_audio_frame(self):
|
51 |
+
if self.out_audio_queue.empty():
|
52 |
+
return None
|
53 |
+
audio_frame = await self.out_audio_queue.get_async()
|
54 |
+
return audio_frame
|