Update README
Browse files
README.md
CHANGED
@@ -23,15 +23,15 @@ Training was done on type-level, where, given the historic form of a type, the m
|
|
23 |
## Demo Usage
|
24 |
|
25 |
```python
|
26 |
-
from transformers import AutoTokenizer,
|
27 |
|
28 |
tokenizer = AutoTokenizer.from_pretrained('aehrm/dtaec-type-normalizer')
|
29 |
-
model =
|
30 |
|
31 |
-
model_in = tokenizer([
|
32 |
-
model_out = model(**model_in)
|
33 |
|
34 |
-
print(tokenizer.
|
35 |
```
|
36 |
|
37 |
|
|
|
23 |
## Demo Usage
|
24 |
|
25 |
```python
|
26 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
27 |
|
28 |
tokenizer = AutoTokenizer.from_pretrained('aehrm/dtaec-type-normalizer')
|
29 |
+
model = AutoModelForSeq2SeqLM.from_pretrained('aehrm/dtaec-type-normalizer')
|
30 |
|
31 |
+
model_in = tokenizer(['Freyheit', 'seyn', 'selbstthätig'], return_tensors='pt', padding=True)
|
32 |
+
model_out = model.generate(**model_in)
|
33 |
|
34 |
+
print(tokenizer.batch_decode(model_out))
|
35 |
```
|
36 |
|
37 |
|