John Langley commited on
Commit
d353b29
·
1 Parent(s): 8167b16

streaming voice

Browse files
Files changed (1) hide show
  1. app.py +6 -8
app.py CHANGED
@@ -7,7 +7,7 @@ from huggingface_hub import hf_hub_download
7
  from llama_cpp import Llama
8
  from faster_whisper import WhisperModel
9
 
10
- from utilsinference import get_sentence, tts_interface, generate_llm_output
11
 
12
  os.environ["CUDACXX"] = "/usr/local/cuda/bin/nvcc"
13
  os.system('python -m unidic download')
@@ -61,16 +61,14 @@ def respond(chat_history, voice):
61
  if not voice:
62
  return None, gr.Warning("Please select a voice.")
63
 
64
- sentence = generate_llm_output(chat_history[-1][0], chat_history[:-1], mistral_llm)
65
- audiopb = tts_interface(sentence, voice)
66
-
67
- print("Inserting sentence to queue")
68
- print(sentence)
69
-
70
 
 
71
 
72
  #history, response = get_sentence(chat_history, mistral_llm)
73
- yield chat_history, sentence, audiopb
74
 
75
 
76
  #Gradio Interface
 
7
  from llama_cpp import Llama
8
  from faster_whisper import WhisperModel
9
 
10
+ from utilsasync import get_sentence, tts_interface
11
 
12
  os.environ["CUDACXX"] = "/usr/local/cuda/bin/nvcc"
13
  os.system('python -m unidic download')
 
61
  if not voice:
62
  return None, gr.Warning("Please select a voice.")
63
 
64
+ for sentence, chatbot_history in get_sentence(chat_history, mistral_llm):
65
+ print("Inserting sentence to queue")
66
+ print(sentence)
 
 
 
67
 
68
+ audiopb = tts_interface(sentence, voice)
69
 
70
  #history, response = get_sentence(chat_history, mistral_llm)
71
+ yield chatbot_history, sentence, audiopb
72
 
73
 
74
  #Gradio Interface