xnetba commited on
Commit
11333c1
1 Parent(s): a1b778d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -7
app.py CHANGED
@@ -2,16 +2,18 @@ import os
2
  import gradio as gr
3
  from text_generation import Client, InferenceAPIClient
4
 
 
5
  openchat_preprompt = (
6
  "\n<human>: Zdravo!\n<bot>: \n"
7
  )
8
 
 
9
  def get_client(model: str):
10
  if model == "togethercomputer/GPT-NeoXT-Chat-Base-20B":
11
  return Client(os.getenv("OPENCHAT_API_URL"))
12
  return InferenceAPIClient(model, token=os.getenv("HF_TOKEN", None))
13
 
14
-
15
  def get_usernames(model: str):
16
  """
17
  Returns:
@@ -23,7 +25,7 @@ def get_usernames(model: str):
23
  return openchat_preprompt, "<human>: ", "<bot>: ", "\n"
24
  return "", "User: ", "Assistant: ", "\n"
25
 
26
-
27
  def predict(
28
  model: str,
29
  inputs: str,
@@ -101,11 +103,11 @@ def predict(
101
  ]
102
  yield chat, history
103
 
104
-
105
  def reset_textbox():
106
  return gr.update(value="")
107
 
108
-
109
  def radio_on_change(
110
  value: str,
111
  disclaimer,
@@ -161,6 +163,7 @@ text_generation_inference = """
161
  openchat_disclaimer = """
162
  """
163
 
 
164
  with gr.Blocks(
165
  css="""#col_container {margin-left: auto; margin-right: auto;}
166
  #chatbot {height: 520px; overflow: auto;}"""
@@ -233,7 +236,8 @@ with gr.Blocks(
233
  visible=False,
234
  )
235
  watermark = gr.Checkbox(value=False, label="Vodeni žig teksta")
236
-
 
237
  model.change(
238
  lambda value: radio_on_change(
239
  value,
@@ -257,6 +261,7 @@ with gr.Blocks(
257
  ],
258
  )
259
 
 
260
  inputs.submit(
261
  predict,
262
  [
@@ -291,6 +296,8 @@ with gr.Blocks(
291
  )
292
  b1.click(reset_textbox, [], [inputs])
293
  inputs.submit(reset_textbox, [], [inputs])
294
-
 
295
  gr.Markdown(description)
296
- demo.queue(concurrency_count=16).launch(debug=True)
 
 
2
  import gradio as gr
3
  from text_generation import Client, InferenceAPIClient
4
 
5
+ #variable contains a pre-prompt string for the chat interaction.
6
  openchat_preprompt = (
7
  "\n<human>: Zdravo!\n<bot>: \n"
8
  )
9
 
10
+ #This function returns a client object based on the provided model.
11
  def get_client(model: str):
12
  if model == "togethercomputer/GPT-NeoXT-Chat-Base-20B":
13
  return Client(os.getenv("OPENCHAT_API_URL"))
14
  return InferenceAPIClient(model, token=os.getenv("HF_TOKEN", None))
15
 
16
+ #This function returns pre-prompt, username, bot name, and separator strings based on the provided model.
17
  def get_usernames(model: str):
18
  """
19
  Returns:
 
25
  return openchat_preprompt, "<human>: ", "<bot>: ", "\n"
26
  return "", "User: ", "Assistant: ", "\n"
27
 
28
+ #This function performs the text generation prediction based on the provided inputs and model parameters.
29
  def predict(
30
  model: str,
31
  inputs: str,
 
103
  ]
104
  yield chat, history
105
 
106
+ #This function resets the input textbox of the app.
107
  def reset_textbox():
108
  return gr.update(value="")
109
 
110
+ #This function handles the change in the selected model radio button and updates the visibility and values of other related input elements accordingly.
111
  def radio_on_change(
112
  value: str,
113
  disclaimer,
 
163
  openchat_disclaimer = """
164
  """
165
 
166
+ #This sets up the Gradio app interface using the gr.Blocks context manager.
167
  with gr.Blocks(
168
  css="""#col_container {margin-left: auto; margin-right: auto;}
169
  #chatbot {height: 520px; overflow: auto;}"""
 
236
  visible=False,
237
  )
238
  watermark = gr.Checkbox(value=False, label="Vodeni žig teksta")
239
+
240
+ #method is used to trigger actions when the selected model radio button changes.
241
  model.change(
242
  lambda value: radio_on_change(
243
  value,
 
261
  ],
262
  )
263
 
264
+ #methods define actions to be performed when the input textbox is submitted or a button is clicked.
265
  inputs.submit(
266
  predict,
267
  [
 
296
  )
297
  b1.click(reset_textbox, [], [inputs])
298
  inputs.submit(reset_textbox, [], [inputs])
299
+
300
+ #The app description markdown is displayed. The app is launched with the specified concurrency count and debug mode enabled.
301
  gr.Markdown(description)
302
+ demo.queue(concurrency_count=16).launch(debug=True)
303
+