AgaMiko commited on
Commit
467bcaa
1 Parent(s): 3f7d4b3

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +12 -8
README.md CHANGED
@@ -84,16 +84,20 @@ model = T5ForConditionalGeneration.from_pretrained("Voicelab/vlt5-base-keywords"
84
  tokenizer = T5Tokenizer.from_pretrained("Voicelab/vlt5-base-keywords")
85
 
86
  task_prefix = "Keywords: "
87
- inputs = ["Christina Katrakis, who spoke to the BBC from Vorokhta in western Ukraine, relays the account of one family, who say Russian soldiers shot at their vehicles while they were leaving their village near Chernobyl in northern Ukraine. She says the cars had white flags and signs saying they were carrying children.",
88
- "Decays the learning rate of each parameter group by gamma every step_size epochs. Notice that such decay can happen simultaneously with other changes to the learning rate from outside this scheduler. When last_epoch=-1, sets initial lr as lr.",
89
- "Hello, I'd like to order a pizza with salami topping."]
 
 
90
 
91
  for sample in inputs:
92
- input_sequences = [task_prefix + sample]
93
- input_ids = tokenizer(input_sequences, return_tensors='pt', truncation=True).input_ids
94
- output = model.generate(input_ids, no_repeat_ngram_size=3, num_beams=4)
95
- predicted = tokenizer.decode(output[0], skip_special_tokens=True)
96
- print(sample, "\n --->", predicted)
 
 
97
 
98
  ```
99
  # Inference
 
84
  tokenizer = T5Tokenizer.from_pretrained("Voicelab/vlt5-base-keywords")
85
 
86
  task_prefix = "Keywords: "
87
+ inputs = [
88
+ "Christina Katrakis, who spoke to the BBC from Vorokhta in western Ukraine, relays the account of one family, who say Russian soldiers shot at their vehicles while they were leaving their village near Chernobyl in northern Ukraine. She says the cars had white flags and signs saying they were carrying children.",
89
+ "Decays the learning rate of each parameter group by gamma every step_size epochs. Notice that such decay can happen simultaneously with other changes to the learning rate from outside this scheduler. When last_epoch=-1, sets initial lr as lr.",
90
+ "Hello, I'd like to order a pizza with salami topping.",
91
+ ]
92
 
93
  for sample in inputs:
94
+ input_sequences = [task_prefix + sample]
95
+ input_ids = tokenizer(
96
+ input_sequences, return_tensors="pt", truncation=True
97
+ ).input_ids
98
+ output = model.generate(input_ids, no_repeat_ngram_size=3, num_beams=4)
99
+ predicted = tokenizer.decode(output[0], skip_special_tokens=True)
100
+ print(sample, "\n --->", predicted)
101
 
102
  ```
103
  # Inference