Update README.md
Browse files
README.md
CHANGED
@@ -62,6 +62,32 @@ The following hyperparameters were used during training:
|
|
62 |
| 0.04 | 5.0 | 108305 | 0.0320 |
|
63 |
| 0.0381 | 6.0 | 129966 | 0.0313 |
|
64 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
|
66 |
### Framework versions
|
67 |
|
|
|
62 |
| 0.04 | 5.0 | 108305 | 0.0320 |
|
63 |
| 0.0381 | 6.0 | 129966 | 0.0313 |
|
64 |
|
65 |
+
### Usage:
|
66 |
+
```py
|
67 |
+
from transformers import AutoTokenizer, T5ForConditionalGeneration
|
68 |
+
|
69 |
+
MODEL_CKPT = "mrm8488/t5-small-finetuned-text2log"
|
70 |
+
|
71 |
+
model = T5ForConditionalGeneration.from_pretrained(MODEL_CKPT).to(device)
|
72 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_CKPT)
|
73 |
+
|
74 |
+
def translate(text):
|
75 |
+
inputs = tokenizer(text, padding="longest", max_length=64, return_tensors="pt")
|
76 |
+
input_ids = inputs.input_ids.to(device)
|
77 |
+
attention_mask = inputs.attention_mask.to(device)
|
78 |
+
|
79 |
+
output = model.generate(input_ids, attention_mask=attention_mask, early_stopping=False, max_length=64)
|
80 |
+
|
81 |
+
return tokenizer.decode(output[0], skip_special_tokens=True)
|
82 |
+
|
83 |
+
prompt_nl_to_fol = "translate to fol: "
|
84 |
+
prompt_fol_to_nl = "translate to nl: "
|
85 |
+
example_1 = "Every killer leaves something."
|
86 |
+
example_2 = "all x1.(_woman(x1) -> exists x2.(_emotion(x2) & _experience(x1,x2)))"
|
87 |
+
|
88 |
+
print(translate(prompt_nl_to_fol + example_1)) # all x1.(_killer(x1) -> exists x2._leave(x1,x2))
|
89 |
+
print(translate(prompt_fol_to_nl + example_2)) # Every woman experiences emotions.
|
90 |
+
```
|
91 |
|
92 |
### Framework versions
|
93 |
|