arjunanand13 commited on
Commit
a7a1792
β€’
1 Parent(s): dc974fe

Rename app.py to app_24_4_2024.py

Browse files
Files changed (1) hide show
  1. app.py β†’ app_24_4_2024.py +2 -1
app.py β†’ app_24_4_2024.py RENAMED
@@ -3,6 +3,7 @@ from transformers import AutoProcessor, Idefics2ForConditionalGeneration, AutoMo
3
  import subprocess
4
  import torch
5
  from peft import LoraConfig
 
6
  # from transformers import BitsAndBytesConfig
7
 
8
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
@@ -17,7 +18,7 @@ processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b", do_image_
17
  # bnb_4bit_compute_dtype=torch.float16
18
  # )
19
 
20
- # model = AutoModelForPreTraining.from_pretrained("HuggingFaceM4/idefics2-8b",quantization_config=bnb_config)
21
  model = Idefics2ForConditionalGeneration.from_pretrained("HuggingFaceM4/idefics2-8b",load_in_4bit=True)
22
 
23
  # if USE_QLORA or USE_LORA:
 
3
  import subprocess
4
  import torch
5
  from peft import LoraConfig
6
+ from huggingface_hub import InferenceApi
7
  # from transformers import BitsAndBytesConfig
8
 
9
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
 
18
  # bnb_4bit_compute_dtype=torch.float16
19
  # )
20
 
21
+
22
  model = Idefics2ForConditionalGeneration.from_pretrained("HuggingFaceM4/idefics2-8b",load_in_4bit=True)
23
 
24
  # if USE_QLORA or USE_LORA: