Update README.md
Browse files
README.md
CHANGED
@@ -1,9 +1,62 @@
|
|
1 |
---
|
2 |
library_name: peft
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
4 |
## Training procedure
|
5 |
|
6 |
### Framework versions
|
7 |
|
8 |
-
|
9 |
- PEFT 0.4.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
library_name: peft
|
3 |
+
license: cc-by-sa-4.0
|
4 |
+
datasets:
|
5 |
+
- ctu-aic/csfever_v2
|
6 |
+
language:
|
7 |
+
- cs
|
8 |
+
metrics:
|
9 |
+
- accuracy
|
10 |
+
- f1
|
11 |
+
- recall
|
12 |
+
- precision
|
13 |
+
pipeline_tag: text-classification
|
14 |
---
|
15 |
+
# Model card for lora-xlm-roberta-large-squad2-csfever_v2-f1
|
16 |
+
|
17 |
+
## Model details
|
18 |
+
Model for natural language inference.
|
19 |
+
|
20 |
## Training procedure
|
21 |
|
22 |
### Framework versions
|
23 |
|
|
|
24 |
- PEFT 0.4.0
|
25 |
+
|
26 |
+
## Uses
|
27 |
+
|
28 |
+
### PEFT (Transformers)
|
29 |
+
```python
|
30 |
+
from peft import PeftModel, PeftConfig
|
31 |
+
from transformers import AutoModelForSequenceClassification, Pipeline, AutoTokenizer
|
32 |
+
|
33 |
+
config = PeftConfig.from_pretrained("ctu-aic/lora-xlm-roberta-large-squad2-csfever_v2-f1")
|
34 |
+
model = AutoModelForSequenceClassification.from_pretrained(config.base_model_name_or_path)
|
35 |
+
model = PeftModel.from_pretrained(model, config)
|
36 |
+
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
|
37 |
+
|
38 |
+
#pipeline for NLI
|
39 |
+
class NliPipeline(Pipeline):
|
40 |
+
def _sanitize_parameters(self, **kwargs):
|
41 |
+
preprocess_kwargs = {}
|
42 |
+
if "evidence" in kwargs:
|
43 |
+
preprocess_kwargs["evidence"] = kwargs["evidence"]
|
44 |
+
return preprocess_kwargs, {}, {}
|
45 |
+
|
46 |
+
def preprocess(self, claim, evidence=""):
|
47 |
+
model_input = self.tokenizer(claim, evidence, return_tensors=self.framework, truncation=True)
|
48 |
+
return model_input
|
49 |
+
def _forward(self, model_inputs):
|
50 |
+
outputs = self.model(**model_inputs)
|
51 |
+
return outputs
|
52 |
+
|
53 |
+
def postprocess(self, model_outputs):
|
54 |
+
logits = model_outputs.logits
|
55 |
+
|
56 |
+
predictions = torch.argmax(logits, dim=-1)
|
57 |
+
return {"logits": logits, "label": int(predictions[0])}
|
58 |
+
|
59 |
+
nli_pipeline = NliPipeline(model=model, tokenizer=tokenizer)
|
60 |
+
|
61 |
+
nli_pipeline("claim", "evidence")
|
62 |
+
```
|