VictorSanh commited on
Commit
794d2b7
1 Parent(s): f78369f

Update visualization

Browse files
Files changed (1) hide show
  1. app_dialogue.py +9 -8
app_dialogue.py CHANGED
@@ -32,7 +32,7 @@ EOS_TOKENS = "</s>;User"
32
  import logging
33
 
34
  from accelerate.utils import get_max_memory
35
- from transformers import AutoConfig, AutoModelForCausalLM, AutoProcessor, AutoTokenizer
36
 
37
 
38
  TOKENIZER_FAST = True
@@ -48,12 +48,13 @@ def load_processor_tokenizer_model(model_name):
48
  use_auth_token=os.getenv("HF_AUTH_TOKEN", True),
49
  truncation_side="left",
50
  )
51
- tokenizer = AutoTokenizer.from_pretrained(
52
- model_name,
53
- use_fast=TOKENIZER_FAST,
54
- use_auth_token=os.getenv("HF_AUTH_TOKEN", True),
55
- truncation_side="left",
56
- )
 
57
  # tokenizer.padding_side = "left" -> we don't need that, do we?
58
 
59
  config = AutoConfig.from_pretrained(model_name, use_auth_token=os.getenv("HF_AUTH_TOKEN", True))
@@ -66,7 +67,7 @@ def load_processor_tokenizer_model(model_name):
66
  # Decrease 2 for Pytorch overhead and 2 for the forward to be safe
67
  max_memory_map[key] = f"{max_memory_map[key] - 4} GiB"
68
 
69
- model = AutoModelForCausalLM.from_pretrained(
70
  model_name,
71
  use_auth_token=os.getenv("HF_AUTH_TOKEN", True),
72
  device_map="auto",
 
32
  import logging
33
 
34
  from accelerate.utils import get_max_memory
35
+ from transformers import AutoConfig, IdeficsForVisionText2Text, AutoProcessor
36
 
37
 
38
  TOKENIZER_FAST = True
 
48
  use_auth_token=os.getenv("HF_AUTH_TOKEN", True),
49
  truncation_side="left",
50
  )
51
+ tokenizer = processor.tokenizer
52
+ # tokenizer = AutoTokenizer.from_pretrained(
53
+ # model_name,
54
+ # use_fast=TOKENIZER_FAST,
55
+ # use_auth_token=os.getenv("HF_AUTH_TOKEN", True),
56
+ # truncation_side="left",
57
+ # )
58
  # tokenizer.padding_side = "left" -> we don't need that, do we?
59
 
60
  config = AutoConfig.from_pretrained(model_name, use_auth_token=os.getenv("HF_AUTH_TOKEN", True))
 
67
  # Decrease 2 for Pytorch overhead and 2 for the forward to be safe
68
  max_memory_map[key] = f"{max_memory_map[key] - 4} GiB"
69
 
70
+ model = IdeficsForVisionText2Text.from_pretrained(
71
  model_name,
72
  use_auth_token=os.getenv("HF_AUTH_TOKEN", True),
73
  device_map="auto",