EvanTHU commited on
Commit
3a57610
·
verified ·
1 Parent(s): 1538bbb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -322,10 +322,10 @@ def motionllm(
322
  sample = {"instruction": prompt, "input": input_video_path}
323
 
324
  prefix = generate_prompt_mlp(sample)
325
- pre = torch.cat((tokenizer.encode(prefix.split('INPUT_VIDEO: ')[0] + "\n", bos=True, eos=False, device=model.device).view(1, -1), tokenizer.encode("INPUT_VIDEO: ", bos=False, eos=False, device=model.device).view(1, -1)), dim=1)
326
 
327
  prompt = (pre, ". ASSISTANT: ")
328
- encoded = (prompt[0], video_feature[0], tokenizer.encode(prompt[1], bos=False, eos=False, device=model.device).view(1, -1))
329
 
330
  t0 = time.perf_counter()
331
 
@@ -565,7 +565,7 @@ print('Load mlp model again from', mlp_path)
565
  print(f"Time to load model: {time.time() - t0:.02f} seconds.", file=sys.stderr)
566
 
567
  model.eval()
568
- model = fabric.setup_module(model)
569
  linear_proj.eval()
570
 
571
  tokenizer = Tokenizer(tokenizer_llm_path)
 
322
  sample = {"instruction": prompt, "input": input_video_path}
323
 
324
  prefix = generate_prompt_mlp(sample)
325
+ pre = torch.cat((tokenizer.encode(prefix.split('INPUT_VIDEO: ')[0] + "\n", bos=True, eos=False, device=device).view(1, -1), tokenizer.encode("INPUT_VIDEO: ", bos=False, eos=False, device=device).view(1, -1)), dim=1)
326
 
327
  prompt = (pre, ". ASSISTANT: ")
328
+ encoded = (prompt[0], video_feature[0], tokenizer.encode(prompt[1], bos=False, eos=False, device=device).view(1, -1))
329
 
330
  t0 = time.perf_counter()
331
 
 
565
  print(f"Time to load model: {time.time() - t0:.02f} seconds.", file=sys.stderr)
566
 
567
  model.eval()
568
+ model = model.cuda()
569
  linear_proj.eval()
570
 
571
  tokenizer = Tokenizer(tokenizer_llm_path)