DongfuJiang commited on
Commit
e1fb4c8
1 Parent(s): b0fa0f9
Files changed (1) hide show
  1. app_high_res.py +1 -3
app_high_res.py CHANGED
@@ -11,12 +11,10 @@ import functools
11
  from transformers import AutoProcessor, Idefics2ForConditionalGeneration
12
  from models.conversation import conv_templates
13
  from typing import List
14
- import subprocess
15
- subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
16
 
17
 
18
  processor = AutoProcessor.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-high-res-35k-mantis-2epoch_4096")
19
- model = Idefics2ForConditionalGeneration.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-high-res-35k-mantis-2epoch_4096", torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2")
20
  MAX_NUM_FRAMES = 24
21
  conv_template = conv_templates["idefics_2"]
22
 
 
11
  from transformers import AutoProcessor, Idefics2ForConditionalGeneration
12
  from models.conversation import conv_templates
13
  from typing import List
 
 
14
 
15
 
16
  processor = AutoProcessor.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-high-res-35k-mantis-2epoch_4096")
17
+ model = Idefics2ForConditionalGeneration.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-high-res-35k-mantis-2epoch_4096", torch_dtype=torch.bfloat16)
18
  MAX_NUM_FRAMES = 24
19
  conv_template = conv_templates["idefics_2"]
20