Update README.md
Browse files
README.md
CHANGED
@@ -17,24 +17,24 @@ Training data :
|
|
17 |
http://dl.fbaipublicfiles.com/rephrasing/rephrasing_dataset.tar.gz
|
18 |
|
19 |
``` .py
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
|
39 |
```
|
40 |
|
|
|
17 |
http://dl.fbaipublicfiles.com/rephrasing/rephrasing_dataset.tar.gz
|
18 |
|
19 |
``` .py
|
20 |
+
from transformers import AutoTokenizer, AutoModelWithLMHead
|
21 |
+
tokenizer = AutoTokenizer.from_pretrained("salesken/natural_rephrase")
|
22 |
+
model = AutoModelWithLMHead.from_pretrained("salesken/natural_rephrase")
|
23 |
+
|
24 |
+
|
25 |
+
Input_query="Hey Siri, Send message to mom to say thank you for the delicious dinner yesterday"
|
26 |
+
query= Input_query + " ~~ "
|
27 |
+
input_ids = tokenizer.encode(query.lower(), return_tensors='pt')
|
28 |
+
sample_outputs = model.generate(input_ids,
|
29 |
+
do_sample=True,
|
30 |
+
num_beams=1,
|
31 |
+
max_length=len(Input_query),
|
32 |
+
temperature=0.2,
|
33 |
+
top_k = 10,
|
34 |
+
num_return_sequences=1)
|
35 |
+
for i in range(len(sample_outputs)):
|
36 |
+
result = tokenizer.decode(sample_outputs[i], skip_special_tokens=True).split('||')[0].split('~~')[1]
|
37 |
+
print(result)
|
38 |
|
39 |
```
|
40 |
|