philschmid HF staff commited on
Commit
f2b7d1a
1 Parent(s): 884d1b9

Update inference.py

Browse files
Files changed (1) hide show
  1. inference.py +9 -6
inference.py CHANGED
@@ -8,6 +8,8 @@ peft_model_id = "philschmid/gemma-7b-dolly-chatml"
8
  tokenizer = AutoTokenizer.from_pretrained(peft_model_id)
9
  model = AutoPeftModelForCausalLM.from_pretrained(peft_model_id, device_map="auto", torch_dtype=torch.float16)
10
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
 
 
11
 
12
  # run inference
13
  messages = [
@@ -18,8 +20,10 @@ messages = [
18
  ]
19
 
20
  prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
21
- outputs = pipe(prompt, max_new_tokens=1024, do_sample=True, temperature=0.7, top_k=50, top_p=0.95, pad_token_id=pipe.tokenizer.pad_token_id, eos_token_id=pipe.tokenizer.eos_token_id)
22
- print(outputs[0]["generated_text"])
 
 
23
 
24
  # run inference
25
  messages = [
@@ -30,8 +34,7 @@ messages = [
30
  ]
31
 
32
  prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
33
- outputs = pipe(prompt, max_new_tokens=1024, do_sample=True, temperature=0.7, top_k=50, top_p=0.95, pad_token_id=pipe.tokenizer.pad_token_id, eos_token_id=pipe.tokenizer.eos_token_id)
34
- print(outputs[0]["generated_text"])
35
-
36
 
37
- # pip3 list | grep -e transformers -e peft -e torch -e trl -e accelerate
 
 
8
  tokenizer = AutoTokenizer.from_pretrained(peft_model_id)
9
  model = AutoPeftModelForCausalLM.from_pretrained(peft_model_id, device_map="auto", torch_dtype=torch.float16)
10
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
11
+ eos_token = tokenizer("<|im_end|>",add_special_tokens=False)["input_ids"][0]
12
+ print(f"eos_token: {eos_token}")
13
 
14
  # run inference
15
  messages = [
 
20
  ]
21
 
22
  prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
23
+ outputs = pipe(prompt, max_new_tokens=1024, do_sample=True, temperature=0.7, top_k=50, top_p=0.95, eos_token_id=eos_token)
24
+
25
+ print(f"prompt:\n {messages[0]['content']}")
26
+ print(f"response:\n {outputs[0]['generated_text'][len(prompt):]}")
27
 
28
  # run inference
29
  messages = [
 
34
  ]
35
 
36
  prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
37
+ outputs = pipe(prompt, max_new_tokens=1024, do_sample=True, temperature=0.7, top_k=50, top_p=0.95, eos_token_id=eos_token)
 
 
38
 
39
+ print(f"prompt:\n {messages[0]['content']}")
40
+ print(f"response:\n {outputs[0]['generated_text'][len(prompt):]}")