Flying-Lynx commited on
Commit
89ccd51
β€’
1 Parent(s): b443c25

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +305 -305
app.py CHANGED
@@ -1,306 +1,306 @@
1
- import gradio as gr
2
- import os
3
- # import copy
4
- import torch
5
- # import random
6
- import spaces
7
-
8
- from eagle import conversation as conversation_lib
9
- from eagle.constants import DEFAULT_IMAGE_TOKEN
10
-
11
- from eagle.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
12
- from eagle.conversation import conv_templates, SeparatorStyle
13
- from eagle.model.builder import load_pretrained_model
14
- from eagle.utils import disable_torch_init
15
- from eagle.mm_utils import tokenizer_image_token, get_model_name_from_path, process_images
16
-
17
- from PIL import Image
18
- import argparse
19
-
20
- from transformers import TextIteratorStreamer
21
- from threading import Thread
22
-
23
- # os.environ['GRADIO_TEMP_DIR'] = './gradio_tmp'
24
- no_change_btn = gr.Button()
25
- enable_btn = gr.Button(interactive=True)
26
- disable_btn = gr.Button(interactive=False)
27
-
28
- argparser = argparse.ArgumentParser()
29
- argparser.add_argument("--server_name", default="0.0.0.0", type=str)
30
- argparser.add_argument("--port", default="6324", type=str)
31
- argparser.add_argument("--model-path", default="NVEagle/Eagle-X5-13B", type=str)
32
- argparser.add_argument("--model-base", type=str, default=None)
33
- argparser.add_argument("--num-gpus", type=int, default=1)
34
- argparser.add_argument("--conv-mode", type=str, default="vicuna_v1")
35
- argparser.add_argument("--temperature", type=float, default=0.2)
36
- argparser.add_argument("--max-new-tokens", type=int, default=512)
37
- argparser.add_argument("--num_frames", type=int, default=16)
38
- argparser.add_argument("--load-8bit", action="store_true")
39
- argparser.add_argument("--load-4bit", action="store_true")
40
- argparser.add_argument("--debug", action="store_true")
41
-
42
- args = argparser.parse_args()
43
- model_path = args.model_path
44
- conv_mode = args.conv_mode
45
- filt_invalid="cut"
46
- model_name = get_model_name_from_path(args.model_path)
47
- tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit)
48
- our_chatbot = None
49
-
50
- def upvote_last_response(state):
51
- return ("",) + (disable_btn,) * 3
52
-
53
-
54
- def downvote_last_response(state):
55
- return ("",) + (disable_btn,) * 3
56
-
57
-
58
- def flag_last_response(state):
59
- return ("",) + (disable_btn,) * 3
60
-
61
- def clear_history():
62
- state =conv_templates[conv_mode].copy()
63
- return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 5
64
-
65
- def add_text(state, imagebox, textbox, image_process_mode):
66
- if state is None:
67
- state = conv_templates[conv_mode].copy()
68
-
69
- if imagebox is not None:
70
- textbox = DEFAULT_IMAGE_TOKEN + '\n' + textbox
71
- image = Image.open(imagebox).convert('RGB')
72
-
73
- if imagebox is not None:
74
- textbox = (textbox, image, image_process_mode)
75
-
76
- state.append_message(state.roles[0], textbox)
77
- state.append_message(state.roles[1], None)
78
-
79
- yield (state, state.to_gradio_chatbot(), "", None) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
80
-
81
- def delete_text(state, image_process_mode):
82
- state.messages[-1][-1] = None
83
- prev_human_msg = state.messages[-2]
84
- if type(prev_human_msg[1]) in (tuple, list):
85
- prev_human_msg[1] = (*prev_human_msg[1][:2], image_process_mode)
86
- yield (state, state.to_gradio_chatbot(), "", None) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
87
-
88
- def regenerate(state, image_process_mode):
89
- state.messages[-1][-1] = None
90
- prev_human_msg = state.messages[-2]
91
- if type(prev_human_msg[1]) in (tuple, list):
92
- prev_human_msg[1] = (*prev_human_msg[1][:2], image_process_mode)
93
- state.skip_next = False
94
- return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 5
95
-
96
- @spaces.GPU
97
- def generate(state, imagebox, textbox, image_process_mode, temperature, top_p, max_output_tokens):
98
- prompt = state.get_prompt()
99
- images = state.get_images(return_pil=True)
100
- #prompt, image_args = process_image(prompt, images)
101
-
102
- ori_prompt = prompt
103
- num_image_tokens = 0
104
-
105
- if images is not None and len(images) > 0:
106
- if len(images) > 0:
107
- if len(images) != prompt.count(DEFAULT_IMAGE_TOKEN):
108
- raise ValueError("Number of images does not match number of <image> tokens in prompt")
109
-
110
- #images = [load_image_from_base64(image) for image in images]
111
- image_sizes = [image.size for image in images]
112
- images = process_images(images, image_processor, model.config)
113
-
114
- if type(images) is list:
115
- images = [image.to(model.device, dtype=torch.float16) for image in images]
116
- else:
117
- images = images.to(model.device, dtype=torch.float16)
118
- else:
119
- images = None
120
- image_sizes = None
121
- image_args = {"images": images, "image_sizes": image_sizes}
122
- else:
123
- images = None
124
- image_args = {}
125
-
126
- max_context_length = getattr(model.config, 'max_position_embeddings', 2048)
127
- max_new_tokens = 512
128
- do_sample = True if temperature > 0.001 else False
129
- stop_str = state.sep if state.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.MPT] else state.sep2
130
-
131
- input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device)
132
- streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=15)
133
-
134
- max_new_tokens = min(max_new_tokens, max_context_length - input_ids.shape[-1] - num_image_tokens)
135
-
136
- if max_new_tokens < 1:
137
- # yield json.dumps({"text": ori_prompt + "Exceeds max token length. Please start a new conversation, thanks.", "error_code": 0}).encode() + b"\0"
138
- return
139
-
140
- thread = Thread(target=model.generate, kwargs=dict(
141
- inputs=input_ids,
142
- do_sample=do_sample,
143
- temperature=temperature,
144
- top_p=top_p,
145
- max_new_tokens=max_new_tokens,
146
- streamer=streamer,
147
- use_cache=True,
148
- pad_token_id=tokenizer.eos_token_id,
149
- **image_args
150
- ))
151
- thread.start()
152
- generated_text = ''
153
- for new_text in streamer:
154
- generated_text += new_text
155
- if generated_text.endswith(stop_str):
156
- generated_text = generated_text[:-len(stop_str)]
157
- state.messages[-1][-1] = generated_text
158
- yield (state, state.to_gradio_chatbot(), "", None) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
159
-
160
- yield (state, state.to_gradio_chatbot(), "", None) + (enable_btn,) * 5
161
-
162
- torch.cuda.empty_cache()
163
-
164
- txt = gr.Textbox(
165
- scale=4,
166
- show_label=False,
167
- placeholder="Enter text and press enter.",
168
- container=False,
169
- )
170
-
171
-
172
- title_markdown = ("""
173
- # Eagle: Exploring The Design Space for Multimodal LLMs with Mixture of Encoders
174
- [[Project Page](TODO)] [[Code](TODO)] [[Model](TODO)] | πŸ“š [[Arxiv](TODO)]]
175
- """)
176
-
177
- tos_markdown = ("""
178
- ### Terms of use
179
- By using this service, users are required to agree to the following terms:
180
- The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research.
181
- Please click the "Flag" button if you get any inappropriate answer! We will collect those to keep improving our moderator.
182
- For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
183
- """)
184
-
185
-
186
- learn_more_markdown = ("""
187
- ### License
188
- The service is a research preview intended for non-commercial use only, subject to the. Please contact us if you find any potential violation.
189
- """)
190
-
191
- block_css = """
192
- #buttons button {
193
- min-width: min(120px,100%);
194
- }
195
- """
196
-
197
- textbox = gr.Textbox(show_label=False, placeholder="Enter text and press ENTER", container=False)
198
- with gr.Blocks(title="Eagle", theme=gr.themes.Default(), css=block_css) as demo:
199
- state = gr.State()
200
-
201
- gr.Markdown(title_markdown)
202
-
203
- with gr.Row():
204
- with gr.Column(scale=3):
205
- imagebox = gr.Image(label="Input Image", type="filepath")
206
- image_process_mode = gr.Radio(
207
- ["Crop", "Resize", "Pad", "Default"],
208
- value="Default",
209
- label="Preprocess for non-square image", visible=False)
210
-
211
- cur_dir = os.path.dirname(os.path.abspath(__file__))
212
- gr.Examples(examples=[
213
- [f"{cur_dir}/assets/health-insurance.png", "Under which circumstances do I need to be enrolled in mandatory health insurance if I am an international student?"],
214
- [f"{cur_dir}/assets/leasing-apartment.png", "I don't have any 3rd party renter's insurance now. Do I need to get one for myself?"],
215
- [f"{cur_dir}/assets/nvidia.jpeg", "Who is the person in the middle?"],
216
- [f"{cur_dir}/assets/animal-compare.png", "Are these two pictures showing the same kind of animal?"],
217
- [f"{cur_dir}/assets/georgia-tech.jpeg", "Where is this photo taken?"]
218
- ], inputs=[imagebox, textbox], cache_examples=False)
219
-
220
- with gr.Accordion("Parameters", open=False) as parameter_row:
221
- temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.2, step=0.1, interactive=True, label="Temperature",)
222
- top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.7, step=0.1, interactive=True, label="Top P",)
223
- max_output_tokens = gr.Slider(minimum=0, maximum=1024, value=512, step=64, interactive=True, label="Max output tokens",)
224
-
225
- with gr.Column(scale=8):
226
- chatbot = gr.Chatbot(
227
- elem_id="chatbot",
228
- label="Eagle Chatbot",
229
- height=650,
230
- layout="panel",
231
- )
232
- with gr.Row():
233
- with gr.Column(scale=8):
234
- textbox.render()
235
- with gr.Column(scale=1, min_width=50):
236
- submit_btn = gr.Button(value="Send", variant="primary")
237
- with gr.Row(elem_id="buttons") as button_row:
238
- upvote_btn = gr.Button(value="πŸ‘ Upvote", interactive=False)
239
- downvote_btn = gr.Button(value="πŸ‘Ž Downvote", interactive=False)
240
- flag_btn = gr.Button(value="⚠️ Flag", interactive=False)
241
- #stop_btn = gr.Button(value="⏹️ Stop Generation", interactive=False)
242
- regenerate_btn = gr.Button(value="πŸ”„ Regenerate", interactive=False)
243
- clear_btn = gr.Button(value="πŸ—‘οΈ Clear", interactive=False)
244
-
245
- gr.Markdown(tos_markdown)
246
- gr.Markdown(learn_more_markdown)
247
- url_params = gr.JSON(visible=False)
248
-
249
- # Register listeners
250
- btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
251
- upvote_btn.click(
252
- upvote_last_response,
253
- [state],
254
- [textbox, upvote_btn, downvote_btn, flag_btn]
255
- )
256
- downvote_btn.click(
257
- downvote_last_response,
258
- [state],
259
- [textbox, upvote_btn, downvote_btn, flag_btn]
260
- )
261
- flag_btn.click(
262
- flag_last_response,
263
- [state],
264
- [textbox, upvote_btn, downvote_btn, flag_btn]
265
- )
266
-
267
- clear_btn.click(
268
- clear_history,
269
- None,
270
- [state, chatbot, textbox, imagebox] + btn_list,
271
- queue=False
272
- )
273
-
274
- regenerate_btn.click(
275
- delete_text,
276
- [state, image_process_mode],
277
- [state, chatbot, textbox, imagebox] + btn_list,
278
- ).then(
279
- generate,
280
- [state, imagebox, textbox, image_process_mode, temperature, top_p, max_output_tokens],
281
- [state, chatbot, textbox, imagebox] + btn_list,
282
- )
283
- textbox.submit(
284
- add_text,
285
- [state, imagebox, textbox, image_process_mode],
286
- [state, chatbot, textbox, imagebox] + btn_list,
287
- ).then(
288
- generate,
289
- [state, imagebox, textbox, image_process_mode, temperature, top_p, max_output_tokens],
290
- [state, chatbot, textbox, imagebox] + btn_list,
291
- )
292
-
293
- submit_btn.click(
294
- add_text,
295
- [state, imagebox, textbox, image_process_mode],
296
- [state, chatbot, textbox, imagebox] + btn_list,
297
- ).then(
298
- generate,
299
- [state, imagebox, textbox, image_process_mode, temperature, top_p, max_output_tokens],
300
- [state, chatbot, textbox, imagebox] + btn_list,
301
- )
302
-
303
- demo.queue(
304
- status_update_rate=10,
305
- api_open=False
306
  ).launch()
 
1
+ import gradio as gr
2
+ import os
3
+ # import copy
4
+ import torch
5
+ # import random
6
+ import spaces
7
+
8
+ from eagle import conversation as conversation_lib
9
+ from eagle.constants import DEFAULT_IMAGE_TOKEN
10
+
11
+ from eagle.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
12
+ from eagle.conversation import conv_templates, SeparatorStyle
13
+ from eagle.model.builder import load_pretrained_model
14
+ from eagle.utils import disable_torch_init
15
+ from eagle.mm_utils import tokenizer_image_token, get_model_name_from_path, process_images
16
+
17
+ from PIL import Image
18
+ import argparse
19
+
20
+ from transformers import TextIteratorStreamer
21
+ from threading import Thread
22
+
23
+ # os.environ['GRADIO_TEMP_DIR'] = './gradio_tmp'
24
+ no_change_btn = gr.Button()
25
+ enable_btn = gr.Button(interactive=True)
26
+ disable_btn = gr.Button(interactive=False)
27
+
28
+ argparser = argparse.ArgumentParser()
29
+ argparser.add_argument("--server_name", default="0.0.0.0", type=str)
30
+ argparser.add_argument("--port", default="6324", type=str)
31
+ argparser.add_argument("--model-path", default="NVEagle/Eagle-X5-13B-Chat", type=str)
32
+ argparser.add_argument("--model-base", type=str, default=None)
33
+ argparser.add_argument("--num-gpus", type=int, default=1)
34
+ argparser.add_argument("--conv-mode", type=str, default="vicuna_v1")
35
+ argparser.add_argument("--temperature", type=float, default=0.2)
36
+ argparser.add_argument("--max-new-tokens", type=int, default=512)
37
+ argparser.add_argument("--num_frames", type=int, default=16)
38
+ argparser.add_argument("--load-8bit", action="store_true")
39
+ argparser.add_argument("--load-4bit", action="store_true")
40
+ argparser.add_argument("--debug", action="store_true")
41
+
42
+ args = argparser.parse_args()
43
+ model_path = args.model_path
44
+ conv_mode = args.conv_mode
45
+ filt_invalid="cut"
46
+ model_name = get_model_name_from_path(args.model_path)
47
+ tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit)
48
+ our_chatbot = None
49
+
50
+ def upvote_last_response(state):
51
+ return ("",) + (disable_btn,) * 3
52
+
53
+
54
+ def downvote_last_response(state):
55
+ return ("",) + (disable_btn,) * 3
56
+
57
+
58
+ def flag_last_response(state):
59
+ return ("",) + (disable_btn,) * 3
60
+
61
+ def clear_history():
62
+ state =conv_templates[conv_mode].copy()
63
+ return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 5
64
+
65
+ def add_text(state, imagebox, textbox, image_process_mode):
66
+ if state is None:
67
+ state = conv_templates[conv_mode].copy()
68
+
69
+ if imagebox is not None:
70
+ textbox = DEFAULT_IMAGE_TOKEN + '\n' + textbox
71
+ image = Image.open(imagebox).convert('RGB')
72
+
73
+ if imagebox is not None:
74
+ textbox = (textbox, image, image_process_mode)
75
+
76
+ state.append_message(state.roles[0], textbox)
77
+ state.append_message(state.roles[1], None)
78
+
79
+ yield (state, state.to_gradio_chatbot(), "", None) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
80
+
81
+ def delete_text(state, image_process_mode):
82
+ state.messages[-1][-1] = None
83
+ prev_human_msg = state.messages[-2]
84
+ if type(prev_human_msg[1]) in (tuple, list):
85
+ prev_human_msg[1] = (*prev_human_msg[1][:2], image_process_mode)
86
+ yield (state, state.to_gradio_chatbot(), "", None) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
87
+
88
+ def regenerate(state, image_process_mode):
89
+ state.messages[-1][-1] = None
90
+ prev_human_msg = state.messages[-2]
91
+ if type(prev_human_msg[1]) in (tuple, list):
92
+ prev_human_msg[1] = (*prev_human_msg[1][:2], image_process_mode)
93
+ state.skip_next = False
94
+ return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 5
95
+
96
+ @spaces.GPU
97
+ def generate(state, imagebox, textbox, image_process_mode, temperature, top_p, max_output_tokens):
98
+ prompt = state.get_prompt()
99
+ images = state.get_images(return_pil=True)
100
+ #prompt, image_args = process_image(prompt, images)
101
+
102
+ ori_prompt = prompt
103
+ num_image_tokens = 0
104
+
105
+ if images is not None and len(images) > 0:
106
+ if len(images) > 0:
107
+ if len(images) != prompt.count(DEFAULT_IMAGE_TOKEN):
108
+ raise ValueError("Number of images does not match number of <image> tokens in prompt")
109
+
110
+ #images = [load_image_from_base64(image) for image in images]
111
+ image_sizes = [image.size for image in images]
112
+ images = process_images(images, image_processor, model.config)
113
+
114
+ if type(images) is list:
115
+ images = [image.to(model.device, dtype=torch.float16) for image in images]
116
+ else:
117
+ images = images.to(model.device, dtype=torch.float16)
118
+ else:
119
+ images = None
120
+ image_sizes = None
121
+ image_args = {"images": images, "image_sizes": image_sizes}
122
+ else:
123
+ images = None
124
+ image_args = {}
125
+
126
+ max_context_length = getattr(model.config, 'max_position_embeddings', 2048)
127
+ max_new_tokens = 512
128
+ do_sample = True if temperature > 0.001 else False
129
+ stop_str = state.sep if state.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.MPT] else state.sep2
130
+
131
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device)
132
+ streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=15)
133
+
134
+ max_new_tokens = min(max_new_tokens, max_context_length - input_ids.shape[-1] - num_image_tokens)
135
+
136
+ if max_new_tokens < 1:
137
+ # yield json.dumps({"text": ori_prompt + "Exceeds max token length. Please start a new conversation, thanks.", "error_code": 0}).encode() + b"\0"
138
+ return
139
+
140
+ thread = Thread(target=model.generate, kwargs=dict(
141
+ inputs=input_ids,
142
+ do_sample=do_sample,
143
+ temperature=temperature,
144
+ top_p=top_p,
145
+ max_new_tokens=max_new_tokens,
146
+ streamer=streamer,
147
+ use_cache=True,
148
+ pad_token_id=tokenizer.eos_token_id,
149
+ **image_args
150
+ ))
151
+ thread.start()
152
+ generated_text = ''
153
+ for new_text in streamer:
154
+ generated_text += new_text
155
+ if generated_text.endswith(stop_str):
156
+ generated_text = generated_text[:-len(stop_str)]
157
+ state.messages[-1][-1] = generated_text
158
+ yield (state, state.to_gradio_chatbot(), "", None) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
159
+
160
+ yield (state, state.to_gradio_chatbot(), "", None) + (enable_btn,) * 5
161
+
162
+ torch.cuda.empty_cache()
163
+
164
+ txt = gr.Textbox(
165
+ scale=4,
166
+ show_label=False,
167
+ placeholder="Enter text and press enter.",
168
+ container=False,
169
+ )
170
+
171
+
172
+ title_markdown = ("""
173
+ # Eagle: Exploring The Design Space for Multimodal LLMs with Mixture of Encoders
174
+ [[Project Page](TODO)] [[Code](TODO)] [[Model](TODO)] | πŸ“š [[Arxiv](TODO)]]
175
+ """)
176
+
177
+ tos_markdown = ("""
178
+ ### Terms of use
179
+ By using this service, users are required to agree to the following terms:
180
+ The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research.
181
+ Please click the "Flag" button if you get any inappropriate answer! We will collect those to keep improving our moderator.
182
+ For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
183
+ """)
184
+
185
+
186
+ learn_more_markdown = ("""
187
+ ### License
188
+ The service is a research preview intended for non-commercial use only, subject to the. Please contact us if you find any potential violation.
189
+ """)
190
+
191
+ block_css = """
192
+ #buttons button {
193
+ min-width: min(120px,100%);
194
+ }
195
+ """
196
+
197
+ textbox = gr.Textbox(show_label=False, placeholder="Enter text and press ENTER", container=False)
198
+ with gr.Blocks(title="Eagle", theme=gr.themes.Default(), css=block_css) as demo:
199
+ state = gr.State()
200
+
201
+ gr.Markdown(title_markdown)
202
+
203
+ with gr.Row():
204
+ with gr.Column(scale=3):
205
+ imagebox = gr.Image(label="Input Image", type="filepath")
206
+ image_process_mode = gr.Radio(
207
+ ["Crop", "Resize", "Pad", "Default"],
208
+ value="Default",
209
+ label="Preprocess for non-square image", visible=False)
210
+
211
+ cur_dir = os.path.dirname(os.path.abspath(__file__))
212
+ gr.Examples(examples=[
213
+ [f"{cur_dir}/assets/health-insurance.png", "Under which circumstances do I need to be enrolled in mandatory health insurance if I am an international student?"],
214
+ [f"{cur_dir}/assets/leasing-apartment.png", "I don't have any 3rd party renter's insurance now. Do I need to get one for myself?"],
215
+ [f"{cur_dir}/assets/nvidia.jpeg", "Who is the person in the middle?"],
216
+ [f"{cur_dir}/assets/animal-compare.png", "Are these two pictures showing the same kind of animal?"],
217
+ [f"{cur_dir}/assets/georgia-tech.jpeg", "Where is this photo taken?"]
218
+ ], inputs=[imagebox, textbox], cache_examples=False)
219
+
220
+ with gr.Accordion("Parameters", open=False) as parameter_row:
221
+ temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.2, step=0.1, interactive=True, label="Temperature",)
222
+ top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.7, step=0.1, interactive=True, label="Top P",)
223
+ max_output_tokens = gr.Slider(minimum=0, maximum=1024, value=512, step=64, interactive=True, label="Max output tokens",)
224
+
225
+ with gr.Column(scale=8):
226
+ chatbot = gr.Chatbot(
227
+ elem_id="chatbot",
228
+ label="Eagle Chatbot",
229
+ height=650,
230
+ layout="panel",
231
+ )
232
+ with gr.Row():
233
+ with gr.Column(scale=8):
234
+ textbox.render()
235
+ with gr.Column(scale=1, min_width=50):
236
+ submit_btn = gr.Button(value="Send", variant="primary")
237
+ with gr.Row(elem_id="buttons") as button_row:
238
+ upvote_btn = gr.Button(value="πŸ‘ Upvote", interactive=False)
239
+ downvote_btn = gr.Button(value="πŸ‘Ž Downvote", interactive=False)
240
+ flag_btn = gr.Button(value="⚠️ Flag", interactive=False)
241
+ #stop_btn = gr.Button(value="⏹️ Stop Generation", interactive=False)
242
+ regenerate_btn = gr.Button(value="πŸ”„ Regenerate", interactive=False)
243
+ clear_btn = gr.Button(value="πŸ—‘οΈ Clear", interactive=False)
244
+
245
+ gr.Markdown(tos_markdown)
246
+ gr.Markdown(learn_more_markdown)
247
+ url_params = gr.JSON(visible=False)
248
+
249
+ # Register listeners
250
+ btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
251
+ upvote_btn.click(
252
+ upvote_last_response,
253
+ [state],
254
+ [textbox, upvote_btn, downvote_btn, flag_btn]
255
+ )
256
+ downvote_btn.click(
257
+ downvote_last_response,
258
+ [state],
259
+ [textbox, upvote_btn, downvote_btn, flag_btn]
260
+ )
261
+ flag_btn.click(
262
+ flag_last_response,
263
+ [state],
264
+ [textbox, upvote_btn, downvote_btn, flag_btn]
265
+ )
266
+
267
+ clear_btn.click(
268
+ clear_history,
269
+ None,
270
+ [state, chatbot, textbox, imagebox] + btn_list,
271
+ queue=False
272
+ )
273
+
274
+ regenerate_btn.click(
275
+ delete_text,
276
+ [state, image_process_mode],
277
+ [state, chatbot, textbox, imagebox] + btn_list,
278
+ ).then(
279
+ generate,
280
+ [state, imagebox, textbox, image_process_mode, temperature, top_p, max_output_tokens],
281
+ [state, chatbot, textbox, imagebox] + btn_list,
282
+ )
283
+ textbox.submit(
284
+ add_text,
285
+ [state, imagebox, textbox, image_process_mode],
286
+ [state, chatbot, textbox, imagebox] + btn_list,
287
+ ).then(
288
+ generate,
289
+ [state, imagebox, textbox, image_process_mode, temperature, top_p, max_output_tokens],
290
+ [state, chatbot, textbox, imagebox] + btn_list,
291
+ )
292
+
293
+ submit_btn.click(
294
+ add_text,
295
+ [state, imagebox, textbox, image_process_mode],
296
+ [state, chatbot, textbox, imagebox] + btn_list,
297
+ ).then(
298
+ generate,
299
+ [state, imagebox, textbox, image_process_mode, temperature, top_p, max_output_tokens],
300
+ [state, chatbot, textbox, imagebox] + btn_list,
301
+ )
302
+
303
+ demo.queue(
304
+ status_update_rate=10,
305
+ api_open=False
306
  ).launch()