ysharma HF staff commited on
Commit
dc62671
β€’
1 Parent(s): 6bfce88

Setting the app as inactive (#132)

Browse files

- Setting the app as inactive (287a2f3730f695e2f1d6bd6b787522bd7a8629ac)
- Setting inputs noninteractive (6c8ace4020b8da4ad5a2230b391aefdc60ee8929)

Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +25 -12
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: πŸ’»
4
  colorFrom: green
5
  colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 4.13.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
 
4
  colorFrom: green
5
  colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 4.31.5
8
  app_file: app.py
9
  pinned: false
10
  license: mit
app.py CHANGED
@@ -9,7 +9,11 @@ api_url = os.getenv('API_URL')
9
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
10
  client = AsyncInferenceClient(api_url)
11
 
12
-
 
 
 
 
13
  system_message = "\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."
14
  title = "Llama2 70B Chatbot"
15
  description = """
@@ -126,14 +130,14 @@ def vote(data: gr.LikeData):
126
 
127
 
128
  additional_inputs=[
129
- gr.Textbox("", label="Optional system prompt"),
130
  gr.Slider(
131
  label="Temperature",
132
  value=0.9,
133
  minimum=0.0,
134
  maximum=1.0,
135
  step=0.05,
136
- interactive=True,
137
  info="Higher values produce more diverse outputs",
138
  ),
139
  gr.Slider(
@@ -142,7 +146,7 @@ additional_inputs=[
142
  minimum=0,
143
  maximum=4096,
144
  step=64,
145
- interactive=True,
146
  info="The maximum numbers of new tokens",
147
  ),
148
  gr.Slider(
@@ -151,7 +155,7 @@ additional_inputs=[
151
  minimum=0.0,
152
  maximum=1,
153
  step=0.05,
154
- interactive=True,
155
  info="Higher values sample more low-probability tokens",
156
  ),
157
  gr.Slider(
@@ -160,31 +164,40 @@ additional_inputs=[
160
  minimum=1.0,
161
  maximum=2.0,
162
  step=0.05,
163
- interactive=True,
164
  info="Penalize repeated tokens",
165
  )
166
  ]
167
 
168
- chatbot_stream = gr.Chatbot(avatar_images=('user.png', 'bot2.png'),bubble_full_width = False)
169
- chatbot_batch = gr.Chatbot(avatar_images=('user1.png', 'bot1.png'),bubble_full_width = False)
170
  chat_interface_stream = gr.ChatInterface(predict,
171
  title=title,
172
  description=description,
173
- textbox=gr.Textbox(),
174
  chatbot=chatbot_stream,
175
  css=css,
176
- examples=examples,
 
 
 
 
177
  #cache_examples=True,
178
  additional_inputs=additional_inputs,)
179
  chat_interface_batch=gr.ChatInterface(predict_batch,
180
  title=title,
181
  description=description,
182
- textbox=gr.Textbox(),
183
  chatbot=chatbot_batch,
184
  css=css,
185
- examples=examples,
 
 
 
 
186
  #cache_examples=True,
187
  additional_inputs=additional_inputs,)
 
188
 
189
  # Gradio Demo
190
  with gr.Blocks() as demo:
 
9
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
10
  client = AsyncInferenceClient(api_url)
11
 
12
+ PLACEHOLDER = '''
13
+ <h2>Important Notice</h2>
14
+ <p>Thank you for your interest in <strong>"Explore_llamav2_with_TGI".</strong> This space is no longer active. We encourage you to explore our other popular Llama2 Spaces, linked in the description above, for similar content and resources.</p>
15
+ <p>We appreciate your understanding and continued support.</p>
16
+ '''
17
  system_message = "\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."
18
  title = "Llama2 70B Chatbot"
19
  description = """
 
130
 
131
 
132
  additional_inputs=[
133
+ gr.Textbox("", label="Optional system prompt", interactive=False),
134
  gr.Slider(
135
  label="Temperature",
136
  value=0.9,
137
  minimum=0.0,
138
  maximum=1.0,
139
  step=0.05,
140
+ interactive=False,
141
  info="Higher values produce more diverse outputs",
142
  ),
143
  gr.Slider(
 
146
  minimum=0,
147
  maximum=4096,
148
  step=64,
149
+ interactive=False,
150
  info="The maximum numbers of new tokens",
151
  ),
152
  gr.Slider(
 
155
  minimum=0.0,
156
  maximum=1,
157
  step=0.05,
158
+ interactive=False,
159
  info="Higher values sample more low-probability tokens",
160
  ),
161
  gr.Slider(
 
164
  minimum=1.0,
165
  maximum=2.0,
166
  step=0.05,
167
+ interactive=False,
168
  info="Penalize repeated tokens",
169
  )
170
  ]
171
 
172
+ chatbot_stream = gr.Chatbot(avatar_images=('user.png', 'bot2.png'),bubble_full_width = False, placeholder=PLACEHOLDER)
173
+ chatbot_batch = gr.Chatbot(avatar_images=('user1.png', 'bot1.png'),bubble_full_width = False, placeholder=PLACEHOLDER)
174
  chat_interface_stream = gr.ChatInterface(predict,
175
  title=title,
176
  description=description,
177
+ textbox=gr.Textbox(interactive=False),
178
  chatbot=chatbot_stream,
179
  css=css,
180
+ submit_btn = gr.Button('Submit', interactive=False),
181
+ retry_btn = gr.Button('πŸ”„ Retry', interactive=False),
182
+ undo_btn = gr.Button('↩️ Undo', interactive=False),
183
+ clear_btn = gr.Button('πŸ—‘οΈ Clear', interactive=False),
184
+ #examples=examples,
185
  #cache_examples=True,
186
  additional_inputs=additional_inputs,)
187
  chat_interface_batch=gr.ChatInterface(predict_batch,
188
  title=title,
189
  description=description,
190
+ textbox=gr.Textbox(interactive=False),
191
  chatbot=chatbot_batch,
192
  css=css,
193
+ submit_btn = gr.Button('Submit', interactive=False),
194
+ retry_btn = gr.Button('πŸ”„ Retry', interactive=False),
195
+ undo_btn = gr.Button('↩️ Undo', interactive=False),
196
+ clear_btn = gr.Button('πŸ—‘οΈ Clear', interactive=False),
197
+ #examples=examples,
198
  #cache_examples=True,
199
  additional_inputs=additional_inputs,)
200
+
201
 
202
  # Gradio Demo
203
  with gr.Blocks() as demo: