Leyo commited on
Commit
e2307a6
1 Parent(s): 70b423d

use IdeficsForVisionText2Text

Browse files
Files changed (1) hide show
  1. app_dialogue.py +2 -2
app_dialogue.py CHANGED
@@ -32,7 +32,7 @@ EOS_TOKENS = "</s>;User"
32
  import logging
33
 
34
  from accelerate.utils import get_max_memory
35
- from transformers import AutoTokenizer, AutoProcessor, AutoConfig, AutoModelForCausalLM
36
 
37
 
38
  TOKENIZER_FAST = True
@@ -66,7 +66,7 @@ def load_processor_tokenizer_model(model_name):
66
  # Decrease 2 for Pytorch overhead and 2 for the forward to be safe
67
  max_memory_map[key] = f"{max_memory_map[key] - 4} GiB"
68
 
69
- model = AutoModelForCausalLM.from_pretrained(
70
  model_name,
71
  use_auth_token=os.getenv("HF_AUTH_TOKEN", True),
72
  device_map="auto",
 
32
  import logging
33
 
34
  from accelerate.utils import get_max_memory
35
+ from transformers import AutoTokenizer, AutoProcessor, AutoConfig, IdeficsForVisionText2Text
36
 
37
 
38
  TOKENIZER_FAST = True
 
66
  # Decrease 2 for Pytorch overhead and 2 for the forward to be safe
67
  max_memory_map[key] = f"{max_memory_map[key] - 4} GiB"
68
 
69
+ model = IdeficsForVisionText2Text.from_pretrained(
70
  model_name,
71
  use_auth_token=os.getenv("HF_AUTH_TOKEN", True),
72
  device_map="auto",