guialfaro commited on
Commit
4ce55a4
1 Parent(s): 73ff216

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -5
README.md CHANGED
@@ -9,10 +9,9 @@ The dataset utilized can be found on the *Files and versions* tab under the name
9
  import torch
10
  from transformers import BartForConditionalGeneration, AutoTokenizer
11
 
12
- model = BartForConditionalGeneration.from_pretrained('guialfaro/korean-paraphrasing')
13
- tokenizer = AutoTokenizer.from_pretrained('guialfaro/korean-paraphrasing')
14
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
- model = model.to(device)
 
16
 
17
  sentence = "7층 방문을 위해 방문록 작성이 필요합니다."
18
  text = f"paraphrase: {sentence} "
@@ -39,7 +38,7 @@ generated_ids = model.generate(
39
 
40
  preds = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in generated_ids]
41
 
42
- print (f"Original Sentence :: {sentence}")
43
- print (f"Paraphrased Sentences :: {preds[0]}")
44
 
45
  ```
 
9
  import torch
10
  from transformers import BartForConditionalGeneration, AutoTokenizer
11
 
 
 
12
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
+ model = BartForConditionalGeneration.from_pretrained('guialfaro/korean-paraphrasing').to(device)
14
+ tokenizer = AutoTokenizer.from_pretrained('guialfaro/korean-paraphrasing')
15
 
16
  sentence = "7층 방문을 위해 방문록 작성이 필요합니다."
17
  text = f"paraphrase: {sentence} "
 
38
 
39
  preds = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in generated_ids]
40
 
41
+ print(f"Original Sentence :: {sentence}")
42
+ print(f"Paraphrased Sentence :: {preds[0]}")
43
 
44
  ```