Update README.md
Browse files
README.md
CHANGED
@@ -38,24 +38,27 @@ messages = [
|
|
38 |
{"role": "user", "content": "Who are you?"},
|
39 |
]
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
|
|
46 |
|
47 |
terminators = [
|
48 |
-
|
49 |
-
|
50 |
]
|
51 |
|
|
|
52 |
outputs = model.generate(
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
59 |
)
|
60 |
response = outputs[0][input_ids.shape[-1]:]
|
61 |
print(tokenizer.decode(response, skip_special_tokens=True))
|
|
|
38 |
{"role": "user", "content": "Who are you?"},
|
39 |
]
|
40 |
|
41 |
+
|
42 |
+
prompt = pipeline.tokenizer.apply_chat_template(
|
43 |
+
messages,
|
44 |
+
tokenize=False,
|
45 |
+
add_generation_prompt=True
|
46 |
+
)
|
47 |
|
48 |
terminators = [
|
49 |
+
pipeline.tokenizer.eos_token_id,
|
50 |
+
pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
51 |
]
|
52 |
|
53 |
+
|
54 |
outputs = model.generate(
|
55 |
+
prompt,
|
56 |
+
max_new_tokens=2048,
|
57 |
+
eos_token_id=terminators,
|
58 |
+
do_sample=False,
|
59 |
+
temperature=0.6,
|
60 |
+
top_p=1,
|
61 |
+
repetition_penalty=1.05
|
62 |
)
|
63 |
response = outputs[0][input_ids.shape[-1]:]
|
64 |
print(tokenizer.decode(response, skip_special_tokens=True))
|