anton-l HF staff commited on
Commit
91602b2
β€’
1 Parent(s): e6ae6ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -12,15 +12,17 @@ from rudalle import get_rudalle_model, get_tokenizer, get_vae
12
  tqdm.__init__ = partialmethod(tqdm.__init__, disable=True)
13
 
14
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
- translation_model = FSMTForConditionalGeneration.from_pretrained("facebook/wmt19-en-ru", torch_dtype=torch.float16)
16
  tokenizer = FSMTTokenizer.from_pretrained("facebook/wmt19-en-ru")
17
- translation_pipe = pipeline("translation", model=translation_model, tokenizer=tokenizer, device=0)
18
  dalle = get_rudalle_model("Malevich", pretrained=True, fp16=True, device=device)
19
  tokenizer = get_tokenizer()
20
  vae = get_vae().to(device)
21
 
22
  def translation_wrapper(text: str):
23
- return translation_pipe(text)[0]["translation_text"]
 
 
 
24
 
25
  def dalle_wrapper(prompt: str):
26
  top_k, top_p = random.choice([
 
12
  tqdm.__init__ = partialmethod(tqdm.__init__, disable=True)
13
 
14
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
+ translation_model = FSMTForConditionalGeneration.from_pretrained("facebook/wmt19-en-ru", torch_dtype=torch.float16).to(device)
16
  tokenizer = FSMTTokenizer.from_pretrained("facebook/wmt19-en-ru")
 
17
  dalle = get_rudalle_model("Malevich", pretrained=True, fp16=True, device=device)
18
  tokenizer = get_tokenizer()
19
  vae = get_vae().to(device)
20
 
21
  def translation_wrapper(text: str):
22
+ input_ids = tokenizer.encode(text, return_tensors="pt")
23
+ outputs = translation_model.generate(input_ids)
24
+ decoded = tokenizer.decode(outputs[0].float(), skip_special_tokens=True)
25
+ return decoded
26
 
27
  def dalle_wrapper(prompt: str):
28
  top_k, top_p = random.choice([