Nemil commited on
Commit
83ff097
β€’
1 Parent(s): 6270cb3

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -26
app.py CHANGED
@@ -140,9 +140,11 @@ class Social_Media_Captioner:
140
  def _load_model(self):
141
  self.bnb_config = BitsAndBytesConfig(
142
  load_in_4bit = True,
 
143
  bnb_4bit_use_double_quant = True,
144
  bnb_4bit_quant_type= "nf4",
145
  bnb_4bit_compute_dtype=torch.bfloat16,
 
146
  )
147
  self.model = AutoModelForCausalLM.from_pretrained(
148
  self.MODEL_NAME,
@@ -155,32 +157,32 @@ class Social_Media_Captioner:
155
  self.tokenizer = AutoTokenizer.from_pretrained(self.MODEL_NAME)
156
  self.tokenizer.pad_token = self.tokenizer.eos_token
157
 
158
- if self.use_finetuned:
159
- # LORA Config Model
160
- self.lora_config = LoraConfig(
161
- r=16,
162
- lora_alpha=32,
163
- target_modules=["query_key_value"],
164
- lora_dropout=0.05,
165
- bias="none",
166
- task_type="CAUSAL_LM"
167
- )
168
- self.model = get_peft_model(self.model, self.lora_config)
169
-
170
- # Fitting the adapters
171
- self.peft_config = PeftConfig.from_pretrained(self.peft_model_name)
172
- self.model = AutoModelForCausalLM.from_pretrained(
173
- self.peft_config.base_model_name_or_path,
174
- return_dict = True,
175
- quantization_config = self.bnb_config,
176
- device_map= "auto",
177
- trust_remote_code = True
178
- )
179
- self.model = PeftModel.from_pretrained(self.model, self.peft_model_name)
180
-
181
- # Defining the tokenizers
182
- self.tokenizer = AutoTokenizer.from_pretrained(self.peft_config.base_model_name_or_path)
183
- self.tokenizer.pad_token = self.tokenizer.eos_token
184
 
185
  self.model_loaded = True
186
  print("Model Loaded successfully")
 
140
  def _load_model(self):
141
  self.bnb_config = BitsAndBytesConfig(
142
  load_in_4bit = True,
143
+ llm_int8_enable_fp32_cpu_offload=True,
144
  bnb_4bit_use_double_quant = True,
145
  bnb_4bit_quant_type= "nf4",
146
  bnb_4bit_compute_dtype=torch.bfloat16,
147
+ load_in_8bit_fp32_cpu_offload=True
148
  )
149
  self.model = AutoModelForCausalLM.from_pretrained(
150
  self.MODEL_NAME,
 
157
  self.tokenizer = AutoTokenizer.from_pretrained(self.MODEL_NAME)
158
  self.tokenizer.pad_token = self.tokenizer.eos_token
159
 
160
+ # if self.use_finetuned:
161
+ # # LORA Config Model
162
+ # self.lora_config = LoraConfig(
163
+ # r=16,
164
+ # lora_alpha=32,
165
+ # target_modules=["query_key_value"],
166
+ # lora_dropout=0.05,
167
+ # bias="none",
168
+ # task_type="CAUSAL_LM"
169
+ # )
170
+ # self.model = get_peft_model(self.model, self.lora_config)
171
+
172
+ # # Fitting the adapters
173
+ # self.peft_config = PeftConfig.from_pretrained(self.peft_model_name)
174
+ # self.model = AutoModelForCausalLM.from_pretrained(
175
+ # self.peft_config.base_model_name_or_path,
176
+ # return_dict = True,
177
+ # quantization_config = self.bnb_config,
178
+ # device_map= "auto",
179
+ # trust_remote_code = True
180
+ # )
181
+ # self.model = PeftModel.from_pretrained(self.model, self.peft_model_name)
182
+
183
+ # # Defining the tokenizers
184
+ # self.tokenizer = AutoTokenizer.from_pretrained(self.peft_config.base_model_name_or_path)
185
+ # self.tokenizer.pad_token = self.tokenizer.eos_token
186
 
187
  self.model_loaded = True
188
  print("Model Loaded successfully")