ai-forever commited on
Commit
dd038a5
1 Parent(s): e3fc629

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -14
README.md CHANGED
@@ -221,23 +221,13 @@ RUSpellRU, MultidomainGold, MedSpellChecker, GitHubTypoCorpusRu are datasets for
221
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
222
 
223
  tokenizer = AutoTokenizer.from_pretrained("ai-forever/sage-mt5-large")
224
- model = AutoModelForSeq2SeqLM.from_pretrained("ai-forever/sage-mt5-large")
225
- model.to("cuda:0")
226
 
227
  sentence = "Перведи мне текст на аглиском: \"Screw you kuys, I am goin hme (c)."
228
- with torch.inference_mode():
229
- encodings = tokenizer(sentence, max_length=None, padding="longest", truncation=False, return_tensors="pt")
230
- for k, v in encodings.items():
231
- encodings[k] = v.to("cuda:0")
232
- res = model.generate(
233
- **encodings,
234
- use_cache=True,
235
- max_length = encodings["input_ids"].size(1) * 1.5
236
- )
237
- res = res.cpu().tolist()
238
- res = tokenizer.batch_decode(res, skip_special_tokens=True)
239
 
240
- print(res)
241
  # ["Переведи мне текст на английском: "Screw you guys, I am going home" (c)."]
242
  ```
243
 
 
221
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
222
 
223
  tokenizer = AutoTokenizer.from_pretrained("ai-forever/sage-mt5-large")
224
+ model = AutoModelForSeq2SeqLM.from_pretrained("ai-forever/sage-mt5-large", device_map='cuda')
 
225
 
226
  sentence = "Перведи мне текст на аглиском: \"Screw you kuys, I am goin hme (c)."
227
+ inputs = tokenizer(sentence, max_length=None, padding="longest", truncation=False, return_tensors="pt")
228
+ outputs = model.generate(**inputs.to(model.device), max_length = inputs["input_ids"].size(1) * 1.5)
229
+ print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
 
 
 
 
 
 
 
 
230
 
 
231
  # ["Переведи мне текст на английском: "Screw you guys, I am going home" (c)."]
232
  ```
233