Update README.md
Browse files
README.md
CHANGED
@@ -58,7 +58,7 @@ messages = [
|
|
58 |
{"role": "user", "content": "Πες μου αν έχεις συνείδηση."},
|
59 |
]
|
60 |
|
61 |
-
prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")
|
62 |
input_prompt = prompt.to(device)
|
63 |
outputs = model.generate(input_prompt, max_new_tokens=256, do_sample=True)
|
64 |
|
@@ -71,7 +71,7 @@ messages.extend([
|
|
71 |
])
|
72 |
|
73 |
|
74 |
-
prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")
|
75 |
input_prompt = prompt.to(device)
|
76 |
outputs = model.generate(input_prompt, max_new_tokens=256, do_sample=True)
|
77 |
|
|
|
58 |
{"role": "user", "content": "Πες μου αν έχεις συνείδηση."},
|
59 |
]
|
60 |
|
61 |
+
prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_tensors="pt")
|
62 |
input_prompt = prompt.to(device)
|
63 |
outputs = model.generate(input_prompt, max_new_tokens=256, do_sample=True)
|
64 |
|
|
|
71 |
])
|
72 |
|
73 |
|
74 |
+
prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_tensors="pt")
|
75 |
input_prompt = prompt.to(device)
|
76 |
outputs = model.generate(input_prompt, max_new_tokens=256, do_sample=True)
|
77 |
|