Update README.md
Browse files
README.md
CHANGED
@@ -38,18 +38,20 @@ query_and_docs = f"Domanda: {query} Contesto: {doc}"
|
|
38 |
|
39 |
model_input = tokenizer(query_and_docs, truncation=True, padding=True, return_tensors="pt")
|
40 |
|
41 |
-
output = model.generate(
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
53 |
|
54 |
tokenizer.batch_decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
55 |
```
|
|
|
38 |
|
39 |
model_input = tokenizer(query_and_docs, truncation=True, padding=True, return_tensors="pt")
|
40 |
|
41 |
+
output = model.generate(
|
42 |
+
input_ids=model_input["input_ids"],
|
43 |
+
attention_mask=model_input["attention_mask"],
|
44 |
+
min_length=10,
|
45 |
+
max_length=256,
|
46 |
+
do_sample=False,
|
47 |
+
early_stopping=True,
|
48 |
+
num_beams=8,
|
49 |
+
temperature=1.0,
|
50 |
+
top_k=None,
|
51 |
+
top_p=None,
|
52 |
+
no_repeat_ngram_size=3,
|
53 |
+
num_return_sequences=1
|
54 |
+
)
|
55 |
|
56 |
tokenizer.batch_decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
57 |
```
|