File size: 785 Bytes
e644b3b
 
 
 
 
 
 
 
 
 
025578e
 
e644b3b
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
---
language:
- pt
---

``` python

from transformers import ElectraForPreTraining, ElectraTokenizerFast
import torch

discriminator = ElectraForPreTraining.from_pretrained("josu/electra-pt-br-small-discriminator")
tokenizer = ElectraTokenizerFast.from_pretrained("josu/electra-pt-br-small-discriminator")

sentence = "os passaros estão cantando"
fake_sentence = "os passaros estão falando"

fake_tokens = tokenizer.tokenize(fake_sentence)
fake_inputs = tokenizer.encode(fake_sentence, return_tensors="pt")
discriminator_outputs = discriminator(fake_inputs)
predictions = torch.round((torch.sign(discriminator_outputs[0]) + 1) / 2)

[print("%7s" % token, end="") for token in fake_tokens]

[print("%7s" % int(prediction), end="") for prediction in predictions.squeeze().tolist()

```