|
|
|
|
|
|
|
import os |
|
|
|
import psutil |
|
|
|
|
|
from streamlit_mic_recorder import mic_recorder |
|
|
|
|
|
from audio_processing.A2T import A2T |
|
from audio_processing.T2A import T2A |
|
from command.utils import build_chain |
|
from llm.llm_factory import LLM_Factory |
|
|
|
|
|
|
|
greeting_text = "Hi, my name is M8... oops, that's from my future, but right now I'm Chelsea, your personal voice assistant. Ask me anything you want and I'll try to help you. Well, maybe you want to talk about your favourite band, maybe Bring Me The Horizon. You want to talk about Chelsea Smile." |
|
|
|
llm_model = LLM_Factory() |
|
|
|
def prepare_cor(input_text: str): |
|
return build_chain.build_command_chain().handle_command(input_text) |
|
|
|
|
|
|
|
trigger = {"hf": "effective"} |
|
|
|
t2a = T2A() |
|
|
|
def main(): |
|
mic = mic_recorder(start_prompt="Record", stop_prompt="Stop", just_once=True) |
|
|
|
t2a.autoplay(greeting_text) |
|
|
|
if mic is not None: |
|
a2t = A2T(mic["bytes"]) |
|
text = a2t.predict() |
|
print(text) |
|
|
|
|
|
|
|
|
|
|
|
llm = llm_model.create_llm(prompt_entity=text, prompt_id=1, trigger=trigger) |
|
response = llm.execution() if llm is not None else "Oops occurred some error. Please try again. Who is Jhon Galt!" |
|
|
|
|
|
t2a.autoplay(response) |
|
|
|
|
|
if __name__ == "__main__": |
|
print(f"Total Memory: {psutil.virtual_memory().total / (1024**3):.2f} GB") |
|
print(f"Available Memory: {psutil.virtual_memory().available / (1024**3):.2f} GB") |
|
print(f"CPU Cores: {psutil.cpu_count()}") |
|
print(f"CPU Usage: {psutil.cpu_percent()}%") |
|
|
|
main() |
|
|