pedropauletti commited on
Commit
0667117
1 Parent(s): 488c845

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -20
app.py CHANGED
@@ -4,6 +4,7 @@ import time
4
  from haystack.document_stores import InMemoryDocumentStore
5
  from haystack.nodes import EmbeddingRetriever
6
  import pandas as pd
 
7
 
8
 
9
  def load_qa_model():
@@ -40,18 +41,26 @@ def add_file(history, file):
40
  return history
41
 
42
 
43
- def bot(history):
44
- print(history)
45
- # response = "**That's cool!**"
46
- history[-1][1] = ""
47
-
48
  global retriever
49
- response = get_answers(retriever, history[0][0])
 
 
 
 
50
 
51
- for character in response:
52
- history[-1][1] += character
53
- time.sleep(0.01)
54
- yield history
 
 
 
 
 
 
 
 
55
 
56
 
57
 
@@ -79,14 +88,19 @@ retriever = load_qa_model()
79
 
80
 
81
  with gr.Blocks() as demo:
82
- chatbot = gr.Chatbot(
 
 
 
 
 
83
  [],
84
  elem_id="chatbot",
85
  bubble_full_width=False,
86
- # avatar_images=(None, "/content/avatar.png"),
87
- )
88
 
89
- with gr.Row():
90
  txt = gr.Textbox(
91
  scale=4,
92
  show_label=False,
@@ -94,12 +108,18 @@ with gr.Blocks() as demo:
94
  container=False,
95
  )
96
  inputRecord = gr.Audio(label="Record a question", source="microphone", type="filepath")
97
- audioOutput = gr.Audio(label="Listen the answer", interactive=False)
98
-
99
- txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
100
- bot, chatbot, chatbot
101
- )
102
- txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
 
 
 
 
 
 
103
 
104
  demo.queue()
105
  demo.launch()
 
4
  from haystack.document_stores import InMemoryDocumentStore
5
  from haystack.nodes import EmbeddingRetriever
6
  import pandas as pd
7
+ from deep_translator import GoogleTranslator
8
 
9
 
10
  def load_qa_model():
 
41
  return history
42
 
43
 
44
+ def chatbot_response(chat_history, language):
 
 
 
 
45
  global retriever
46
+ global last_answer
47
+
48
+ if(len(chat_history) > 0):
49
+ chat_history[-1][1] = ""
50
+
51
 
52
+ if language == 'pt-br':
53
+ response = get_answers(retriever, GoogleTranslator(source='pt', target='en').translate(chat_history[0][0]))
54
+ response = GoogleTranslator(source='en', target='pt').translate(response)
55
+ else:
56
+ response = get_answers(retriever, chat_history[0][0])
57
+
58
+ last_answer = response
59
+
60
+ for character in response:
61
+ chat_history[-1][1] += character
62
+ time.sleep(0.01)
63
+ yield history
64
 
65
 
66
 
 
88
 
89
 
90
  with gr.Blocks() as demo:
91
+ with gr.Accordion("Settings", open=False):
92
+ language = gr.Radio(["en-us", "pt-br"], label="Language", info="Choose the language to display the classification result and audio", value='en-us', interactive=True)
93
+
94
+
95
+ with gr.Tab("Help"):
96
+ chatbot = gr.Chatbot(
97
  [],
98
  elem_id="chatbot",
99
  bubble_full_width=False,
100
+ # avatar_images=(None, "content/avatar-socialear.png"),
101
+ )
102
 
103
+ with gr.Row():
104
  txt = gr.Textbox(
105
  scale=4,
106
  show_label=False,
 
108
  container=False,
109
  )
110
  inputRecord = gr.Audio(label="Record a question", source="microphone", type="filepath")
111
+ with gr.Column():
112
+ btn = gr.Button(value="Listen the answer")
113
+ audioOutput = gr.Audio(interactive=False)
114
+
115
+ txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
116
+ chatbot_response, [chatbot, language], chatbot
117
+ )
118
+ txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
119
+ inputRecord.stop_recording(transcribe_speech, [inputRecord, language], [txt])
120
+ btn.click(fn=TTS_chatbot, inputs=[language], outputs=audioOutput)
121
+
122
+ examples = gr.Examples(examples=examples_chatbot, inputs=[txt])
123
 
124
  demo.queue()
125
  demo.launch()