omni-research commited on
Commit
d8920cd
1 Parent(s): ddc1135

set model=Tarsier2-7b

Browse files
Files changed (3) hide show
  1. app.py +2 -0
  2. dataset/processor.py +1 -1
  3. tools/utils.py +2 -2
app.py CHANGED
@@ -23,6 +23,8 @@ from dataset.processor import Processor
23
  import os
24
  import torch
25
 
 
 
26
  device = 'cuda'
27
  model_path = os.getenv("MODEL_PATH", "omni-research/Tarsier2-7b")
28
  max_n_frames = int(os.getenv("MAX_N_FRAMES", 8))
 
23
  import os
24
  import torch
25
 
26
+ # huggingface-cli login
27
+
28
  device = 'cuda'
29
  model_path = os.getenv("MODEL_PATH", "omni-research/Tarsier2-7b")
30
  max_n_frames = int(os.getenv("MAX_N_FRAMES", 8))
dataset/processor.py CHANGED
@@ -86,7 +86,7 @@ class Processor(object):
86
  model_name_or_path,
87
  padding_side='left',
88
  trust_remote_code=True,
89
- token=HF_TOKEN,
90
  )
91
  self.processor = CustomImageProcessor(sub_processor)
92
  self.tokenizer = sub_processor.tokenizer
 
86
  model_name_or_path,
87
  padding_side='left',
88
  trust_remote_code=True,
89
+ # token=HF_TOKEN,
90
  )
91
  self.processor = CustomImageProcessor(sub_processor)
92
  self.tokenizer = sub_processor.tokenizer
tools/utils.py CHANGED
@@ -55,7 +55,7 @@ def load_model_and_processor(model_name_or_path, max_n_frames=8):
55
  model_config = LlavaConfig.from_pretrained(
56
  model_name_or_path,
57
  trust_remote_code=True,
58
- token=HF_TOKEN,
59
  )
60
  model = TarsierForConditionalGeneration.from_pretrained(
61
  model_name_or_path,
@@ -63,7 +63,7 @@ def load_model_and_processor(model_name_or_path, max_n_frames=8):
63
  device_map='auto',
64
  torch_dtype=torch.float16,
65
  trust_remote_code=True,
66
- token=HF_TOKEN,
67
  )
68
  model.eval()
69
  return model, processor
 
55
  model_config = LlavaConfig.from_pretrained(
56
  model_name_or_path,
57
  trust_remote_code=True,
58
+ # token=HF_TOKEN,
59
  )
60
  model = TarsierForConditionalGeneration.from_pretrained(
61
  model_name_or_path,
 
63
  device_map='auto',
64
  torch_dtype=torch.float16,
65
  trust_remote_code=True,
66
+ # token=HF_TOKEN,
67
  )
68
  model.eval()
69
  return model, processor