|
import torch |
|
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig |
|
import warnings |
|
import glob |
|
from peft import PeftModel |
|
warnings.filterwarnings("ignore") |
|
|
|
|
|
base_model_id = "mistralai/Mistral-7B-v0.1" |
|
bnb_config = BitsAndBytesConfig( |
|
load_in_4bit=True, |
|
bnb_4bit_use_double_quant=True, |
|
bnb_4bit_quant_type="nf4", |
|
bnb_4bit_compute_dtype=torch.bfloat16 |
|
) |
|
|
|
base_model = AutoModelForCausalLM.from_pretrained( |
|
base_model_id, |
|
quantization_config=bnb_config, |
|
device_map="auto", |
|
trust_remote_code=True, |
|
use_auth_token=True |
|
) |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(base_model_id, trust_remote_code=True, padding_side='left') |
|
tokenizer.pad_token = tokenizer.eos_token |
|
|
|
|
|
|
|
ft_model = PeftModel.from_pretrained(base_model, "PEFT-CHECPOINT-PATH") |
|
prefix = "translate English Text to Hindi Text: " |
|
eval_prompt = prefix+"Why people are crazy?.</end>" |
|
|
|
model_input = tokenizer(eval_prompt, return_tensors="pt").to("cuda") |
|
|
|
ft_model.eval() |
|
|
|
with torch.no_grad(): |
|
print(tokenizer.decode(ft_model.generate(**model_input, max_new_tokens=70, pad_token_id=2, repetition_penalty=1.2)[0], skip_special_tokens=True)) |