VictorSanh commited on
Commit
fd388f6
1 Parent(s): 2e03202

cleanup with transformers integration

Browse files
comparative_app.py CHANGED
@@ -19,21 +19,21 @@ from transformers import Idefics2ForConditionalGeneration
19
 
20
  DEVICE = torch.device("cuda")
21
  MODELS = {
22
- "idefics2 lima 200": Idefics2ForConditionalGeneration.from_pretrained(
23
  "HuggingFaceM4/idefics2-tfrm-compatible",
24
  torch_dtype=torch.bfloat16,
25
  _attn_implementation="flash_attention_2",
26
  trust_remote_code=True,
27
  token=os.environ["HF_AUTH_TOKEN"],
28
- revision="11794e2ae02dbf1c55d0ebd92c28e5b0b604cf5f",
29
  ).to(DEVICE),
30
- "idefics2 sft 12600": Idefics2ForConditionalGeneration.from_pretrained(
31
  "HuggingFaceM4/idefics2-tfrm-compatible",
32
  torch_dtype=torch.bfloat16,
33
  _attn_implementation="flash_attention_2",
34
  trust_remote_code=True,
35
  token=os.environ["HF_AUTH_TOKEN"],
36
- revision="86f134822798266d0d8db049cc6458c625e32344",
37
  ).to(DEVICE),
38
  }
39
  PROCESSOR = AutoProcessor.from_pretrained(
@@ -199,13 +199,7 @@ def model_inference(
199
  user_prompt=user_prompt,
200
  chat_history=chat_history,
201
  )
202
- msg = PROCESSOR.apply_chat_template(formated_prompt_list, add_generation_prompt=True, tokenize=False)
203
- inputs = PROCESSOR.tokenizer(msg, return_tensors="pt", add_special_tokens=False)
204
- all_images = extract_images_from_msg_list(formated_prompt_list)
205
- if all_images:
206
- img_inp = PROCESSOR(all_images)
207
- inputs["pixel_values"] = torch.tensor(img_inp["pixel_values"])
208
- inputs["pixel_attention_mask"] = torch.tensor(img_inp["pixel_attention_mask"])
209
  inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
210
  generation_args.update(inputs)
211
 
 
19
 
20
  DEVICE = torch.device("cuda")
21
  MODELS = {
22
+ "tr_288_cinco_final_sft_sphinx_11000": Idefics2ForConditionalGeneration.from_pretrained(
23
  "HuggingFaceM4/idefics2-tfrm-compatible",
24
  torch_dtype=torch.bfloat16,
25
  _attn_implementation="flash_attention_2",
26
  trust_remote_code=True,
27
  token=os.environ["HF_AUTH_TOKEN"],
28
+ revision="2e56f9030ba9a17b6ebcd1c9ad5311d5fad0115f",
29
  ).to(DEVICE),
30
+ "tr_290_bis_288_cinco_chatty_150": Idefics2ForConditionalGeneration.from_pretrained(
31
  "HuggingFaceM4/idefics2-tfrm-compatible",
32
  torch_dtype=torch.bfloat16,
33
  _attn_implementation="flash_attention_2",
34
  trust_remote_code=True,
35
  token=os.environ["HF_AUTH_TOKEN"],
36
+ revision="3dc93be345d64fb6b1c550a233fe87ddb36f183d",
37
  ).to(DEVICE),
38
  }
39
  PROCESSOR = AutoProcessor.from_pretrained(
 
199
  user_prompt=user_prompt,
200
  chat_history=chat_history,
201
  )
202
+ inputs = PROCESSOR.apply_chat_template(formated_prompt_list, add_generation_prompt=True, return_tensors="pt")
 
 
 
 
 
 
203
  inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
204
  generation_args.update(inputs)
205
 
the_updated_app_with_tfrm_integration.py CHANGED
@@ -192,13 +192,7 @@ def model_inference(
192
  user_prompt=user_prompt,
193
  chat_history=chat_history,
194
  )
195
- msg = PROCESSOR.apply_chat_template(formated_prompt_list, add_generation_prompt=True, tokenize=False)
196
- inputs = PROCESSOR.tokenizer(msg, return_tensors="pt", add_special_tokens=False)
197
- all_images = extract_images_from_msg_list(formated_prompt_list)
198
- if all_images:
199
- img_inp = PROCESSOR(all_images)
200
- inputs["pixel_values"] = torch.tensor(img_inp["pixel_values"])
201
- inputs["pixel_attention_mask"] = torch.tensor(img_inp["pixel_attention_mask"])
202
  inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
203
  generation_args.update(inputs)
204
 
 
192
  user_prompt=user_prompt,
193
  chat_history=chat_history,
194
  )
195
+ inputs = PROCESSOR.apply_chat_template(formated_prompt_list, add_generation_prompt=True, return_tensors="pt")
 
 
 
 
 
 
196
  inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
197
  generation_args.update(inputs)
198