efederici commited on
Commit
d814455
1 Parent(s): 9b34753

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +14 -12
README.md CHANGED
@@ -38,18 +38,20 @@ query_and_docs = f"Domanda: {query} Contesto: {doc}"
38
 
39
  model_input = tokenizer(query_and_docs, truncation=True, padding=True, return_tensors="pt")
40
 
41
- output = model.generate(input_ids=model_input["input_ids"],
42
- attention_mask=model_input["attention_mask"],
43
- min_length=10,
44
- max_length=256,
45
- do_sample=False,
46
- early_stopping=True,
47
- num_beams=8,
48
- temperature=1.0,
49
- top_k=None,
50
- top_p=None,
51
- no_repeat_ngram_size=3,
52
- num_return_sequences=1)
 
 
53
 
54
  tokenizer.batch_decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=True)
55
  ```
 
38
 
39
  model_input = tokenizer(query_and_docs, truncation=True, padding=True, return_tensors="pt")
40
 
41
+ output = model.generate(
42
+ input_ids=model_input["input_ids"],
43
+ attention_mask=model_input["attention_mask"],
44
+ min_length=10,
45
+ max_length=256,
46
+ do_sample=False,
47
+ early_stopping=True,
48
+ num_beams=8,
49
+ temperature=1.0,
50
+ top_k=None,
51
+ top_p=None,
52
+ no_repeat_ngram_size=3,
53
+ num_return_sequences=1
54
+ )
55
 
56
  tokenizer.batch_decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=True)
57
  ```