eduardo-alvarez commited on
Commit
ef0f6a7
β€’
1 Parent(s): 0fbd2eb

fixed chatbot

Browse files
Files changed (1) hide show
  1. app.py +31 -9
app.py CHANGED
@@ -57,8 +57,8 @@ with demo:
57
  This function simulates streaming by yielding characters one by one.
58
  """
59
  url = inference_endpoint_url
60
- params = {"query": query,"selected_model":chat_model}
61
- with requests.get(url, json=params, stream=True) as r:
62
  for chunk in r.iter_content(chunk_size=1):
63
  if chunk:
64
  yield chunk.decode()
@@ -68,16 +68,38 @@ with demo:
68
  Wrapper function to call the streaming API and compile the response.
69
  """
70
  response = ''
71
-
72
- global chat_model_selection
73
-
74
  for char in call_api_and_stream_response(query, chat_model=chat_model_selection):
75
- if char == '<':
76
  break
77
  response += char
78
- yield response
79
-
80
- gr.ChatInterface(get_response, retry_btn = None, undo_btn=None, concurrency_limit=inference_concurrency_limit).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
83
  with gr.TabItem("πŸ† LLM Leadeboard", elem_id="llm-benchmark-table", id=0):
 
57
  This function simulates streaming by yielding characters one by one.
58
  """
59
  url = inference_endpoint_url
60
+ params = {"query": query, "selected_model": chat_model}
61
+ with requests.get(url, json=params, stream=True) as r: # Use params for query parameters
62
  for chunk in r.iter_content(chunk_size=1):
63
  if chunk:
64
  yield chunk.decode()
 
68
  Wrapper function to call the streaming API and compile the response.
69
  """
70
  response = ''
 
 
 
71
  for char in call_api_and_stream_response(query, chat_model=chat_model_selection):
72
+ if char == '<': # This seems to be your stopping condition; adjust as needed.
73
  break
74
  response += char
75
+ yield [(f"πŸ€– Response from LLM: {chat_model_selection}", response)] # Correct format for Gradio Chatbot
76
+
77
+ with gr.Blocks() as chat_interface:
78
+ chatbot = gr.Chatbot()
79
+ msg = gr.Textbox()
80
+ submit = gr.Button("Submit")
81
+ clear = gr.Button("Clear")
82
+
83
+ def user(user_message, history):
84
+ return "", history + [[user_message, None]]
85
+
86
+ def clear_chat(*args):
87
+ return [] # Returning an empty list to signify clearing the chat, adjust as per Gradio's capabilities
88
+
89
+ submit.click(
90
+ fn=get_response,
91
+ inputs=[msg, chatbot],
92
+ outputs=chatbot
93
+ )
94
+
95
+ clear.click(
96
+ fn=clear_chat,
97
+ inputs=None,
98
+ outputs=chatbot
99
+ )
100
+
101
+ chat_interface.queue()
102
+ chat_interface.launch()
103
 
104
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
105
  with gr.TabItem("πŸ† LLM Leadeboard", elem_id="llm-benchmark-table", id=0):