Update my_model/captioner/image_captioning.py
Browse files
my_model/captioner/image_captioning.py
CHANGED
@@ -22,6 +22,7 @@ class ImageCaptioningModel:
|
|
22 |
self.device_map = config.DEVICE_MAP
|
23 |
self.torch_dtype = config.TORCH_DTYPE
|
24 |
self.load_in_8bit = config.LOAD_IN_8BIT
|
|
|
25 |
self.low_cpu_mem_usage = config.LOW_CPU_MEM_USAGE
|
26 |
self.skip_secial_tokens = config.SKIP_SPECIAL_TOKENS
|
27 |
|
@@ -31,6 +32,7 @@ class ImageCaptioningModel:
|
|
31 |
if self.model_type == 'i_blip':
|
32 |
self.processor = InstructBlipProcessor.from_pretrained(self.model_path,
|
33 |
load_in_8bit=self.load_in_8bit,
|
|
|
34 |
torch_dtype=self.torch_dtype,
|
35 |
device_map=self.device_map
|
36 |
)
|
|
|
22 |
self.device_map = config.DEVICE_MAP
|
23 |
self.torch_dtype = config.TORCH_DTYPE
|
24 |
self.load_in_8bit = config.LOAD_IN_8BIT
|
25 |
+
self.load_in_4bit = config.LOAD_IN_4BIT
|
26 |
self.low_cpu_mem_usage = config.LOW_CPU_MEM_USAGE
|
27 |
self.skip_secial_tokens = config.SKIP_SPECIAL_TOKENS
|
28 |
|
|
|
32 |
if self.model_type == 'i_blip':
|
33 |
self.processor = InstructBlipProcessor.from_pretrained(self.model_path,
|
34 |
load_in_8bit=self.load_in_8bit,
|
35 |
+
load_in_4bit=self.load_in_4bit
|
36 |
torch_dtype=self.torch_dtype,
|
37 |
device_map=self.device_map
|
38 |
)
|