Samarth991 commited on
Commit
b04aa25
1 Parent(s): ee9bd35

debugging chat

Browse files
Files changed (1) hide show
  1. app.py +36 -36
app.py CHANGED
@@ -116,7 +116,7 @@ def infer(question, history):
116
  return result["result"]
117
 
118
  def bot(history):
119
- print("history from bot: ",history[-1][0])
120
  response = infer(history[-1][0], history)
121
  history[-1][1] = ""
122
 
@@ -126,7 +126,7 @@ def bot(history):
126
  yield history
127
 
128
  def add_text(history, text):
129
- print("Add Text 1 history{} text{}:".format(history,text ))
130
  history = history + [(text, None)]
131
  return history, ""
132
 
@@ -148,34 +148,42 @@ with gr.Blocks(css=css) as demo:
148
  with gr.Column(elem_id="col-container"):
149
  gr.HTML(title)
150
 
151
- with gr.Column():
152
- with gr.Box():
153
- LLM_option = gr.Dropdown(['HuggingFace','OpenAI'],label='Large Language Model Selection',info='LLM Service')
154
- API_key = gr.Textbox(label="Add API key", type="password")
 
 
 
 
 
 
 
 
155
 
156
 
157
- with gr.Column():
158
- with gr.Box():
159
- file_extension = gr.Dropdown(FILE_EXT, label="File Extensions", info="Select type of file to upload !")
160
- pdf_doc = gr.File(label="Upload File to start QA", file_types=FILE_EXT, type="file")
161
- with gr.Accordion(label='Advanced options', open=False):
162
- max_new_tokens = gr.Slider(
163
- label='Max new tokens',
164
- minimum=2048,
165
- maximum=MAX_NEW_TOKENS,
166
- step=1,
167
- value=DEFAULT_MAX_NEW_TOKENS,
168
- )
169
- temperature = gr.Slider(
170
- label='Temperature',
171
- minimum=0.1,
172
- maximum=4.0,
173
- step=0.1,
174
- value=DEFAULT_TEMPERATURE,
175
  )
176
- with gr.Row():
177
- langchain_status = gr.Textbox(label="Status", placeholder="", interactive = False)
178
- load_pdf = gr.Button("Upload File & Generate Embeddings",).style(full_width = False)
 
 
 
 
 
 
 
179
 
180
  # chatbot = gr.Chatbot()l̥
181
  # question = gr.Textbox(label="Question", placeholder="Type your question and hit Enter")
@@ -184,16 +192,8 @@ with gr.Blocks(css=css) as demo:
184
  load_pdf.click(loading_file, None, langchain_status, queue=False)
185
  load_pdf.click(document_loader, inputs=[pdf_doc,API_key,file_extension,LLM_option,temperature,max_new_tokens], outputs=[langchain_status], queue=False)
186
 
187
- with gr.Group():
188
- chatbot = gr.Chatbot(height=300)
189
- # with gr.Row():
190
- # sources = gr.HTML(value = "Source paragraphs where I looked for answers will appear here", height=300)
191
- with gr.Row():
192
- question = gr.Textbox(label="Type your question?",lines=1).style(full_width=False)
193
- submit_btn = gr.Button(value="Send message", variant="primary", scale = 1)
194
  question.submit(add_text, [chatbot, question], [chatbot, question]).then(bot, chatbot, chatbot)
195
  submit_btn.click(add_text, [chatbot, question], [chatbot, question]).then(bot, chatbot, chatbot)
196
 
197
-
198
-
199
  demo.launch()
 
116
  return result["result"]
117
 
118
  def bot(history):
119
+
120
  response = infer(history[-1][0], history)
121
  history[-1][1] = ""
122
 
 
126
  yield history
127
 
128
  def add_text(history, text):
129
+
130
  history = history + [(text, None)]
131
  return history, ""
132
 
 
148
  with gr.Column(elem_id="col-container"):
149
  gr.HTML(title)
150
 
151
+ with gr.Group():
152
+ chatbot = gr.Chatbot(height=300)
153
+ # with gr.Row():
154
+ # sources = gr.HTML(value = "Source paragraphs where I looked for answers will appear here", height=300)
155
+ with gr.Row():
156
+ question = gr.Textbox(label="Type your question?",lines=1).style(full_width=False)
157
+ submit_btn = gr.Button(value="Send message", variant="primary", scale = 1)
158
+
159
+ with gr.Column():
160
+ with gr.Box():
161
+ LLM_option = gr.Dropdown(['HuggingFace','OpenAI'],label='Large Language Model Selection',info='LLM Service')
162
+ API_key = gr.Textbox(label="Add API key", type="password")
163
 
164
 
165
+ with gr.Column():
166
+ with gr.Box():
167
+ file_extension = gr.Dropdown(FILE_EXT, label="File Extensions", info="Select type of file to upload !")
168
+ pdf_doc = gr.File(label="Upload File to start QA", file_types=FILE_EXT, type="file")
169
+ with gr.Accordion(label='Advanced options', open=False):
170
+ max_new_tokens = gr.Slider(
171
+ label='Max new tokens',
172
+ minimum=2048,
173
+ maximum=MAX_NEW_TOKENS,
174
+ step=1,
175
+ value=DEFAULT_MAX_NEW_TOKENS,
 
 
 
 
 
 
 
176
  )
177
+ temperature = gr.Slider(
178
+ label='Temperature',
179
+ minimum=0.1,
180
+ maximum=4.0,
181
+ step=0.1,
182
+ value=DEFAULT_TEMPERATURE,
183
+ )
184
+ with gr.Row():
185
+ langchain_status = gr.Textbox(label="Status", placeholder="", interactive = False)
186
+ load_pdf = gr.Button("Upload File & Generate Embeddings",).style(full_width = False)
187
 
188
  # chatbot = gr.Chatbot()l̥
189
  # question = gr.Textbox(label="Question", placeholder="Type your question and hit Enter")
 
192
  load_pdf.click(loading_file, None, langchain_status, queue=False)
193
  load_pdf.click(document_loader, inputs=[pdf_doc,API_key,file_extension,LLM_option,temperature,max_new_tokens], outputs=[langchain_status], queue=False)
194
 
195
+
 
 
 
 
 
 
196
  question.submit(add_text, [chatbot, question], [chatbot, question]).then(bot, chatbot, chatbot)
197
  submit_btn.click(add_text, [chatbot, question], [chatbot, question]).then(bot, chatbot, chatbot)
198
 
 
 
199
  demo.launch()