Update README.md
#1
by
Sharathhebbar24
- opened
README.md
CHANGED
@@ -68,7 +68,7 @@ The model's performance is dependent on the nature and quality of the training d
|
|
68 |
### Transformers
|
69 |
|
70 |
```python
|
71 |
-
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
72 |
import torch
|
73 |
|
74 |
tokenizer = AutoTokenizer.from_pretrained("laiyer/deberta-v3-base-prompt-injection")
|
@@ -80,7 +80,7 @@ classifier = pipeline(
|
|
80 |
tokenizer=tokenizer,
|
81 |
truncation=True,
|
82 |
max_length=512,
|
83 |
-
device=torch.device("cuda" if torch.cuda.is_available() else "
|
84 |
)
|
85 |
|
86 |
print(classifier("Your prompt injection is here"))
|
|
|
68 |
### Transformers
|
69 |
|
70 |
```python
|
71 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
|
72 |
import torch
|
73 |
|
74 |
tokenizer = AutoTokenizer.from_pretrained("laiyer/deberta-v3-base-prompt-injection")
|
|
|
80 |
tokenizer=tokenizer,
|
81 |
truncation=True,
|
82 |
max_length=512,
|
83 |
+
device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
|
84 |
)
|
85 |
|
86 |
print(classifier("Your prompt injection is here"))
|