ylacombe HF staff commited on
Commit
830bcbc
1 Parent(s): 7b1c594

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -5
app.py CHANGED
@@ -5,16 +5,25 @@ import os
5
  import gradio as gr
6
  import numpy as np
7
  import torch
 
 
 
 
 
 
8
 
9
  DESCRIPTION = """# Speak with Llama2
10
  TODO
11
  """
12
 
 
 
13
  system_message = "\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."
14
  temperature = 0.9
15
  top_p = 0.6
16
  repetition_penalty = 1.2
17
 
 
18
  import gradio as gr
19
  import os
20
  import time
@@ -56,9 +65,8 @@ def add_file(history, file):
56
 
57
  def bot(history):
58
 
59
-
60
  history[-1][1] = ""
61
- for character in text_client.predict(
62
  history,
63
  system_message,
64
  temperature,
@@ -67,9 +75,30 @@ def bot(history):
67
  repetition_penalty,
68
  api_name="/chat"
69
  ):
70
- history[-1][1] += character
71
  yield history
72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
  with gr.Blocks() as demo:
75
  chatbot = gr.Chatbot(
@@ -87,15 +116,19 @@ with gr.Blocks() as demo:
87
  container=False,
88
  )
89
  btn = gr.inputs.Audio(source="microphone", type="filepath", optional=True)
 
 
 
90
 
91
  txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
92
  bot, chatbot, chatbot
93
- )
 
94
  txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)
95
 
96
  file_msg = btn.stop_recording(add_file, [chatbot, btn], [chatbot], queue=False).then(
97
  bot, chatbot, chatbot
98
- )
99
 
100
  #file_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)
101
 
 
5
  import gradio as gr
6
  import numpy as np
7
  import torch
8
+ import nltk # we'll use this to split into sentences
9
+ import uuid
10
+ import soundfile as SF
11
+
12
+ from TTS.api import TTS
13
+ tts = TTS("tts_models/multilingual/multi-dataset/xtts_v1", gpu=True)
14
 
15
  DESCRIPTION = """# Speak with Llama2
16
  TODO
17
  """
18
 
19
+ CACHE_EXAMPLES = os.getenv("CACHE_EXAMPLES") == "1"
20
+
21
  system_message = "\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."
22
  temperature = 0.9
23
  top_p = 0.6
24
  repetition_penalty = 1.2
25
 
26
+
27
  import gradio as gr
28
  import os
29
  import time
 
65
 
66
  def bot(history):
67
 
 
68
  history[-1][1] = ""
69
+ for character in text_client.submit(
70
  history,
71
  system_message,
72
  temperature,
 
75
  repetition_penalty,
76
  api_name="/chat"
77
  ):
78
+ history[-1][1] = character
79
  yield history
80
 
81
+ def generate_speech(history):
82
+ text_to_generate = history[-1][1]
83
+ text_to_generate = text_to_generate.replace("\n", " ").strip()
84
+ text_to_generate = nltk.sent_tokenize(text_to_generate)
85
+
86
+ filename = f"{uuid.uuid4()}.wav"
87
+ sampling_rate = tts.synthesizer.tts_config.audio["sample_rate"]
88
+ silence = [0] * int(0.25 * sampling_rate)
89
+
90
+
91
+ for sentence in text_to_generate:
92
+ # generate speech by cloning a voice using default settings
93
+ wav = tts.tts(text=sentence,
94
+ #speaker_wav="/home/yoach/spaces/talkWithLLMs/examples/female.wav",
95
+ speed=1.5,
96
+ language="en")
97
+
98
+ yield (sampling_rate, np.array(wav)) #np.array(wav + silence))
99
+
100
+
101
+
102
 
103
  with gr.Blocks() as demo:
104
  chatbot = gr.Chatbot(
 
116
  container=False,
117
  )
118
  btn = gr.inputs.Audio(source="microphone", type="filepath", optional=True)
119
+
120
+ with gr.Row():
121
+ audio = gr.Audio(type="numpy", streaming=True, autoplay=True)
122
 
123
  txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
124
  bot, chatbot, chatbot
125
+ ).then(generate_speech, chatbot, audio)
126
+
127
  txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)
128
 
129
  file_msg = btn.stop_recording(add_file, [chatbot, btn], [chatbot], queue=False).then(
130
  bot, chatbot, chatbot
131
+ ).then(generate_speech, chatbot, audio)
132
 
133
  #file_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)
134