Update README.md
Browse files
README.md
CHANGED
|
@@ -72,7 +72,7 @@ from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
|
| 72 |
tokenizer = AutoTokenizer.from_pretrained("AIPsy/bert-base-client-topic-classification-eng")
|
| 73 |
model = AutoModelForSequenceClassification.from_pretrained("AIPsy/bert-base-client-topic-classification-eng")
|
| 74 |
|
| 75 |
-
text = "
|
| 76 |
|
| 77 |
encoding = tokenizer(
|
| 78 |
text,
|
|
@@ -83,7 +83,7 @@ encoding = tokenizer(
|
|
| 83 |
output = model(encoding['input_ids'], encoding['attention_mask']).logits
|
| 84 |
result = np.argmax(output.detach().numpy(), axis=-1)
|
| 85 |
print(id2label[result[0]])
|
| 86 |
-
'
|
| 87 |
```
|
| 88 |
|
| 89 |
## Dataset
|
|
|
|
| 72 |
tokenizer = AutoTokenizer.from_pretrained("AIPsy/bert-base-client-topic-classification-eng")
|
| 73 |
model = AutoModelForSequenceClassification.from_pretrained("AIPsy/bert-base-client-topic-classification-eng")
|
| 74 |
|
| 75 |
+
text = "That in essense it's right; it's responsibility."
|
| 76 |
|
| 77 |
encoding = tokenizer(
|
| 78 |
text,
|
|
|
|
| 83 |
output = model(encoding['input_ids'], encoding['attention_mask']).logits
|
| 84 |
result = np.argmax(output.detach().numpy(), axis=-1)
|
| 85 |
print(id2label[result[0]])
|
| 86 |
+
'Guilt and Responsibility'
|
| 87 |
```
|
| 88 |
|
| 89 |
## Dataset
|