Update README.md
Browse files
README.md
CHANGED
@@ -21,7 +21,7 @@ from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
|
21 |
tokenizer = AutoTokenizer.from_pretrained("tum-nlp/Deberta_Human_Value_Detector")
|
22 |
trained_model = AutoModelForSequenceClassification.from_pretrained("tum-nlp/Deberta_Human_Value_Detector", trust_remote_code=True)
|
23 |
|
24 |
-
example_text ='
|
25 |
|
26 |
encoding = tokenizer.encode_plus(
|
27 |
example_text,
|
|
|
21 |
tokenizer = AutoTokenizer.from_pretrained("tum-nlp/Deberta_Human_Value_Detector")
|
22 |
trained_model = AutoModelForSequenceClassification.from_pretrained("tum-nlp/Deberta_Human_Value_Detector", trust_remote_code=True)
|
23 |
|
24 |
+
example_text ='We should ban whaling because whales are a species at the risk of distinction'
|
25 |
|
26 |
encoding = tokenizer.encode_plus(
|
27 |
example_text,
|