nightdessert
commited on
Commit
•
44b8a04
1
Parent(s):
2ea844f
Update README.md
Browse files
README.md
CHANGED
@@ -26,12 +26,23 @@ model_name = "nightdessert/WeCheck"
|
|
26 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
27 |
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
28 |
premise = "I first thought that I liked the movie, but upon second thought it was actually disappointing." # Input for Summarization/ Dialogue / Paraphrase
|
29 |
-
hypothesis = "The movie was not good."
|
30 |
input = tokenizer(premise, hypothesis, truncation=True, return_tensors="pt", truncation_strategy="only_first", max_length=512)
|
31 |
output = model(input["input_ids"].to(device))['logits'][:,0] # device = "cuda:0" or "cpu"
|
32 |
prediction = torch.sigmoid(output).tolist()
|
33 |
-
print(prediction)
|
34 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
license: openrail
|
37 |
pipeline_tag: text-classification
|
|
|
26 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
27 |
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
28 |
premise = "I first thought that I liked the movie, but upon second thought it was actually disappointing." # Input for Summarization/ Dialogue / Paraphrase
|
29 |
+
hypothesis = "The movie was not good." # Output for Summarization/ Dialogue / Paraphrase
|
30 |
input = tokenizer(premise, hypothesis, truncation=True, return_tensors="pt", truncation_strategy="only_first", max_length=512)
|
31 |
output = model(input["input_ids"].to(device))['logits'][:,0] # device = "cuda:0" or "cpu"
|
32 |
prediction = torch.sigmoid(output).tolist()
|
33 |
+
print(prediction) #0.884
|
34 |
```
|
35 |
+
or apply for a batch of samples
|
36 |
+
```python
|
37 |
+
premise = ["I first thought that I liked the movie, but upon second thought it was actually disappointing."]*3 # Input list for Summarization/ Dialogue / Paraphrase
|
38 |
+
hypothesis = ["The movie was not good."]*3 # Output list for Summarization/ Dialogue / Paraphrase
|
39 |
+
batch_tokens = tokenizer.batch_encode_plus(list(zip(premise, hypothesis)), padding=True,
|
40 |
+
truncation=True, max_length=512, return_tensors="pt", truncation_strategy="only_first")
|
41 |
+
output = model(batch_tokens["input_ids"].to(device))['logits'][:,0] # device = "cuda:0" or "cpu"
|
42 |
+
prediction = torch.sigmoid(output).tolist()
|
43 |
+
print(prediction) #[0.884,0.884,0.884]
|
44 |
+
```
|
45 |
+
|
46 |
|
47 |
license: openrail
|
48 |
pipeline_tag: text-classification
|