add generation_config
Browse files
README.md
CHANGED
@@ -139,6 +139,13 @@ prompt = generate_prompt(
|
|
139 |
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=2048)
|
140 |
input_ids = inputs["input_ids"].to(model.device)
|
141 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
142 |
with torch.no_grad():
|
143 |
gen_outputs = model.generate(
|
144 |
input_ids=input_ids,
|
|
|
139 |
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=2048)
|
140 |
input_ids = inputs["input_ids"].to(model.device)
|
141 |
|
142 |
+
generation_config = GenerationConfig(
|
143 |
+
temperature=0.2,
|
144 |
+
top_p=0.75,
|
145 |
+
top_k=40,
|
146 |
+
num_beams=4,
|
147 |
+
)
|
148 |
+
|
149 |
with torch.no_grad():
|
150 |
gen_outputs = model.generate(
|
151 |
input_ids=input_ids,
|