ClownRat commited on
Commit
8c55b8f
β€’
1 Parent(s): 9b4dadd

update demo.

Browse files
Files changed (1) hide show
  1. app.py +104 -102
app.py CHANGED
@@ -61,13 +61,12 @@ The service is a research preview intended for non-commercial use only, subject
61
 
62
 
63
  class Chat:
64
- def __init__(self, model_path, conv_mode, model_base=None, load_8bit=False, load_4bit=False, device='cuda'):
65
  # disable_torch_init()
66
  model_name = get_model_name_from_path(model_path)
67
  self.tokenizer, self.model, processor, context_len = load_pretrained_model(
68
  model_path, model_base, model_name,
69
  load_8bit, load_4bit,
70
- device=device,
71
  offload_folder="save_folder")
72
  self.processor = processor
73
  self.conv_mode = conv_mode
@@ -193,7 +192,9 @@ def generate(image, video, first_run, state, state_, textbox_in, tensor, modals,
193
  state.append_message(state.roles[1], textbox_out)
194
 
195
  return (gr.update(value=image if os.path.exists(image) else None, interactive=True), gr.update(value=video if os.path.exists(video) else None, interactive=True),
196
- state.to_gradio_chatbot(), False, state, state_, gr.update(value=None, interactive=True), tensor, modals)
 
 
197
 
198
 
199
  def regenerate(state, state_, textbox, tensor, modals):
@@ -216,103 +217,104 @@ def clear_history(state, state_, tensor, modals):
216
  True, state, state_, gr.update(value=None, interactive=True), [], [])
217
 
218
 
219
- if __name__ == '__main__':
220
- conv_mode = "llama_2"
221
- model_path = 'DAMO-NLP-SG/VideoLLaMA2-7B'
222
-
223
- def find_cuda():
224
- # Check if CUDA_HOME or CUDA_PATH environment variables are set
225
- cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH')
226
-
227
- if cuda_home and os.path.exists(cuda_home):
228
- return cuda_home
229
-
230
- # Search for the nvcc executable in the system's PATH
231
- nvcc_path = shutil.which('nvcc')
232
-
233
- if nvcc_path:
234
- # Remove the 'bin/nvcc' part to get the CUDA installation path
235
- cuda_path = os.path.dirname(os.path.dirname(nvcc_path))
236
- return cuda_path
237
-
238
- return None
239
-
240
- cuda_path = find_cuda()
241
-
242
- if cuda_path:
243
- print(f"CUDA installation found at: {cuda_path}")
244
- else:
245
- print("CUDA installation not found")
246
-
247
- device = torch.device("cuda")
248
-
249
- handler = Chat(model_path, conv_mode=conv_mode, load_8bit=False, load_4bit=True)
250
- # handler.model.to(dtype=torch.float16)
251
- # handler = handler.model.to(device)
252
-
253
- if not os.path.exists("temp"):
254
- os.makedirs("temp")
255
-
256
- textbox = gr.Textbox(
257
- show_label=False, placeholder="Enter text and press ENTER", container=False
258
- )
259
- with gr.Blocks(title='VideoLLaMA2πŸš€', theme=gr.themes.Default(), css=block_css) as demo:
260
- gr.Markdown(title_markdown)
261
- state = gr.State()
262
- state_ = gr.State()
263
- first_run = gr.State()
264
- tensor = gr.State()
265
- modals = gr.State()
266
-
267
- with gr.Row():
268
- with gr.Column(scale=3):
269
- image = gr.Image(label="Input Image", type="filepath")
270
- video = gr.Video(label="Input Video")
271
-
272
- cur_dir = os.path.dirname(os.path.abspath(__file__))
273
- gr.Examples(
274
- examples=[
275
- [
276
- f"{cur_dir}/examples/extreme_ironing.jpg",
277
- "What is unusual about this image?",
278
- ],
279
- [
280
- f"{cur_dir}/examples/waterview.jpg",
281
- "What are the things I should be cautious about when I visit here?",
282
- ],
283
- [
284
- f"{cur_dir}/examples/desert.jpg",
285
- "If there are factual errors in the questions, point it out; if not, proceed answering the question. What’s happening in the desert?",
286
- ],
287
  ],
288
- inputs=[image, textbox],
289
- )
290
-
291
- with gr.Column(scale=7):
292
- chatbot = gr.Chatbot(label="VideoLLaMA2", bubble_full_width=True, height=750)
293
- with gr.Row():
294
- with gr.Column(scale=8):
295
- textbox.render()
296
- with gr.Column(scale=1, min_width=50):
297
- submit_btn = gr.Button(value="Send", variant="primary", interactive=True)
298
- with gr.Row(elem_id="buttons") as button_row:
299
- upvote_btn = gr.Button(value="πŸ‘ Upvote", interactive=True)
300
- downvote_btn = gr.Button(value="πŸ‘Ž Downvote", interactive=True)
301
- # flag_btn = gr.Button(value="⚠️ Flag", interactive=True)
302
- # stop_btn = gr.Button(value="⏹️ Stop Generation", interactive=False)
303
- regenerate_btn = gr.Button(value="πŸ”„ Regenerate", interactive=True)
304
- clear_btn = gr.Button(value="πŸ—‘οΈ Clear history", interactive=True)
305
-
306
- gr.Markdown(tos_markdown)
307
- gr.Markdown(learn_more_markdown)
308
-
309
- submit_btn.click(generate, [image, video, first_run, state, state_, textbox, tensor, modals],
310
- [image, video, chatbot, first_run, state, state_, textbox, tensor, modals])
311
-
312
- regenerate_btn.click(regenerate, [state, state_, textbox, tensor, modals], [state, state_, textbox, chatbot, first_run, tensor, modals]).then(
313
- generate, [image, video, first_run, state, state_, textbox, tensor, modals], [image, video, chatbot, first_run, state, state_, textbox, tensor, modals])
314
-
315
- clear_btn.click(clear_history, [state, state_, tensor, modals],
316
- [image, video, chatbot, first_run, state, state_, textbox, tensor, modals])
317
-
318
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
 
63
  class Chat:
64
+ def __init__(self, model_path, conv_mode, model_base=None, load_8bit=False, load_4bit=False):
65
  # disable_torch_init()
66
  model_name = get_model_name_from_path(model_path)
67
  self.tokenizer, self.model, processor, context_len = load_pretrained_model(
68
  model_path, model_base, model_name,
69
  load_8bit, load_4bit,
 
70
  offload_folder="save_folder")
71
  self.processor = processor
72
  self.conv_mode = conv_mode
 
192
  state.append_message(state.roles[1], textbox_out)
193
 
194
  return (gr.update(value=image if os.path.exists(image) else None, interactive=True), gr.update(value=video if os.path.exists(video) else None, interactive=True),
195
+ state.to_gradio_chatbot(), False, state, state_, gr.update(value=None, interactive=True),
196
+ # tensor, modals
197
+ )
198
 
199
 
200
  def regenerate(state, state_, textbox, tensor, modals):
 
217
  True, state, state_, gr.update(value=None, interactive=True), [], [])
218
 
219
 
220
+ conv_mode = "llama_2"
221
+ model_path = 'DAMO-NLP-SG/VideoLLaMA2-7B'
222
+
223
+ def find_cuda():
224
+ # Check if CUDA_HOME or CUDA_PATH environment variables are set
225
+ cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH')
226
+
227
+ if cuda_home and os.path.exists(cuda_home):
228
+ return cuda_home
229
+
230
+ # Search for the nvcc executable in the system's PATH
231
+ nvcc_path = shutil.which('nvcc')
232
+
233
+ if nvcc_path:
234
+ # Remove the 'bin/nvcc' part to get the CUDA installation path
235
+ cuda_path = os.path.dirname(os.path.dirname(nvcc_path))
236
+ return cuda_path
237
+
238
+ return None
239
+
240
+ cuda_path = find_cuda()
241
+
242
+ if cuda_path:
243
+ print(f"CUDA installation found at: {cuda_path}")
244
+ else:
245
+ print("CUDA installation not found")
246
+
247
+ device = torch.device("cuda")
248
+
249
+ handler = Chat(model_path, conv_mode=conv_mode, load_8bit=False, load_4bit=True)
250
+ # handler.model.to(dtype=torch.float16)
251
+ # handler = handler.model.to(device)
252
+
253
+ if not os.path.exists("temp"):
254
+ os.makedirs("temp")
255
+
256
+ textbox = gr.Textbox(
257
+ show_label=False, placeholder="Enter text and press ENTER", container=False
258
+ )
259
+ with gr.Blocks(title='VideoLLaMA2πŸš€', theme=gr.themes.Default(), css=block_css) as demo:
260
+ gr.Markdown(title_markdown)
261
+ state = gr.State()
262
+ state_ = gr.State()
263
+ first_run = gr.State()
264
+ tensor = gr.State()
265
+ modals = gr.State()
266
+
267
+ with gr.Row():
268
+ with gr.Column(scale=3):
269
+ image = gr.Image(label="Input Image", type="filepath")
270
+ video = gr.Video(label="Input Video")
271
+
272
+ cur_dir = os.path.dirname(os.path.abspath(__file__))
273
+ gr.Examples(
274
+ examples=[
275
+ [
276
+ f"{cur_dir}/examples/extreme_ironing.jpg",
277
+ "What is unusual about this image?",
 
 
 
 
 
 
 
 
 
 
278
  ],
279
+ [
280
+ f"{cur_dir}/examples/waterview.jpg",
281
+ "What are the things I should be cautious about when I visit here?",
282
+ ],
283
+ [
284
+ f"{cur_dir}/examples/desert.jpg",
285
+ "If there are factual errors in the questions, point it out; if not, proceed answering the question. What’s happening in the desert?",
286
+ ],
287
+ ],
288
+ inputs=[image, textbox],
289
+ )
290
+
291
+ with gr.Column(scale=7):
292
+ chatbot = gr.Chatbot(label="VideoLLaMA2", bubble_full_width=True, height=750)
293
+ with gr.Row():
294
+ with gr.Column(scale=8):
295
+ textbox.render()
296
+ with gr.Column(scale=1, min_width=50):
297
+ submit_btn = gr.Button(value="Send", variant="primary", interactive=True)
298
+ with gr.Row(elem_id="buttons") as button_row:
299
+ upvote_btn = gr.Button(value="πŸ‘ Upvote", interactive=True)
300
+ downvote_btn = gr.Button(value="πŸ‘Ž Downvote", interactive=True)
301
+ # flag_btn = gr.Button(value="⚠️ Flag", interactive=True)
302
+ # stop_btn = gr.Button(value="⏹️ Stop Generation", interactive=False)
303
+ regenerate_btn = gr.Button(value="πŸ”„ Regenerate", interactive=True)
304
+ clear_btn = gr.Button(value="πŸ—‘οΈ Clear history", interactive=True)
305
+
306
+ gr.Markdown(tos_markdown)
307
+ gr.Markdown(learn_more_markdown)
308
+
309
+ submit_btn.click(generate, [image, video, first_run, state, state_, textbox, tensor, modals],
310
+ [image, video, chatbot, first_run, state, state_, textbox,
311
+ # tensor, modals
312
+ ])
313
+
314
+ regenerate_btn.click(regenerate, [state, state_, textbox, tensor, modals], [state, state_, textbox, chatbot, first_run, tensor, modals]).then(
315
+ generate, [image, video, first_run, state, state_, textbox, tensor, modals], [image, video, chatbot, first_run, state, state_, textbox, tensor, modals])
316
+
317
+ clear_btn.click(clear_history, [state, state_, tensor, modals],
318
+ [image, video, chatbot, first_run, state, state_, textbox, tensor, modals])
319
+
320
+ demo.launch()