Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,7 @@ bnb_config = BitsAndBytesConfig(
|
|
11 |
load_in_4bit=True,
|
12 |
bnb_4bit_use_double_quant=True,
|
13 |
bnb_4bit_quant_type="nf4",
|
14 |
-
bnb_4bit_compute_dtype=torch.
|
15 |
)
|
16 |
|
17 |
# Load models and tokenizer efficiently
|
@@ -24,7 +24,7 @@ model = PeftModel.from_pretrained(model, model_id)
|
|
24 |
|
25 |
def greet(text):
|
26 |
with torch.no_grad(): # Disable gradient calculation for inference
|
27 |
-
batch = tokenizer(f'### Human: {text}', return_tensors='pt') # Move tensors to device
|
28 |
with torch.cuda.amp.autocast(): # Enable mixed-precision if available
|
29 |
output_tokens = model.generate(**batch, max_new_tokens=25)
|
30 |
return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
|
|
|
11 |
load_in_4bit=True,
|
12 |
bnb_4bit_use_double_quant=True,
|
13 |
bnb_4bit_quant_type="nf4",
|
14 |
+
bnb_4bit_compute_dtype=torch.float16
|
15 |
)
|
16 |
|
17 |
# Load models and tokenizer efficiently
|
|
|
24 |
|
25 |
def greet(text):
|
26 |
with torch.no_grad(): # Disable gradient calculation for inference
|
27 |
+
batch = tokenizer(f'### Human: {text}\n### Assistant:', return_tensors='pt') # Move tensors to device
|
28 |
with torch.cuda.amp.autocast(): # Enable mixed-precision if available
|
29 |
output_tokens = model.generate(**batch, max_new_tokens=25)
|
30 |
return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
|