MoritzLaurer HF staff librarian-bot commited on
Commit
b3546ea
1 Parent(s): 0de4830

Librarian Bot: Update Hugging Face dataset ID (#4)

Browse files

- Librarian Bot: Update Hugging Face dataset ID (bf8a37ae515c0375d46672a412034147e99972e5)


Co-authored-by: Librarian Bot (Bot) <librarian-bot@users.noreply.huggingface.co>

Files changed (1) hide show
  1. README.md +54 -67
README.md CHANGED
@@ -1,101 +1,88 @@
1
  ---
2
- language:
3
  - en
 
4
  tags:
5
  - text-classification
6
  - zero-shot-classification
7
- license: mit
8
- metrics:
9
- - accuracy
10
  datasets:
11
  - multi_nli
12
- - anli
13
  - fever
14
  - lingnli
15
  - alisawuffles/WANLI
 
 
16
  pipeline_tag: zero-shot-classification
17
- #- text-classification
18
- #widget:
19
- #- text: "I first thought that I really liked the movie, but upon second thought it was actually disappointing. [SEP] The movie was not good."
20
-
21
- model-index: # info: https://github.com/huggingface/hub-docs/blame/main/modelcard.md
22
  - name: DeBERTa-v3-large-mnli-fever-anli-ling-wanli
23
  results:
24
  - task:
25
- type: text-classification # Required. Example: automatic-speech-recognition
26
- name: Natural Language Inference # Optional. Example: Speech Recognition
27
  dataset:
28
- type: multi_nli # Required. Example: common_voice. Use dataset id from https://hf.co/datasets
29
- name: MultiNLI-matched # Required. A pretty name for the dataset. Example: Common Voice (French)
30
- split: validation_matched # Optional. Example: test
31
  metrics:
32
- - type: accuracy # Required. Example: wer. Use metric id from https://hf.co/metrics
33
- value: 0,912 # Required. Example: 20.90
34
- #name: # Optional. Example: Test WER
35
- verified: false # Optional. If true, indicates that evaluation was generated by Hugging Face (vs. self-reported).
36
  - task:
37
- type: text-classification # Required. Example: automatic-speech-recognition
38
- name: Natural Language Inference # Optional. Example: Speech Recognition
39
  dataset:
40
- type: multi_nli # Required. Example: common_voice. Use dataset id from https://hf.co/datasets
41
- name: MultiNLI-mismatched # Required. A pretty name for the dataset. Example: Common Voice (French)
42
- split: validation_mismatched # Optional. Example: test
43
  metrics:
44
- - type: accuracy # Required. Example: wer. Use metric id from https://hf.co/metrics
45
- value: 0,908 # Required. Example: 20.90
46
- #name: # Optional. Example: Test WER
47
- verified: false # Optional. If true, indicates that evaluation was generated by Hugging Face (vs. self-reported).
48
  - task:
49
- type: text-classification # Required. Example: automatic-speech-recognition
50
- name: Natural Language Inference # Optional. Example: Speech Recognition
51
  dataset:
52
- type: anli # Required. Example: common_voice. Use dataset id from https://hf.co/datasets
53
- name: ANLI-all # Required. A pretty name for the dataset. Example: Common Voice (French)
54
- split: test_r1+test_r2+test_r3 # Optional. Example: test
55
  metrics:
56
- - type: accuracy # Required. Example: wer. Use metric id from https://hf.co/metrics
57
- value: 0,702 # Required. Example: 20.90
58
- #name: # Optional. Example: Test WER
59
- verified: false # Optional. If true, indicates that evaluation was generated by Hugging Face (vs. self-reported).
60
  - task:
61
- type: text-classification # Required. Example: automatic-speech-recognition
62
- name: Natural Language Inference # Optional. Example: Speech Recognition
63
  dataset:
64
- type: anli # Required. Example: common_voice. Use dataset id from https://hf.co/datasets
65
- name: ANLI-r3 # Required. A pretty name for the dataset. Example: Common Voice (French)
66
- split: test_r3 # Optional. Example: test
67
  metrics:
68
- - type: accuracy # Required. Example: wer. Use metric id from https://hf.co/metrics
69
- value: 0,64 # Required. Example: 20.90
70
- #name: # Optional. Example: Test WER
71
- verified: false # Optional. If true, indicates that evaluation was generated by Hugging Face (vs. self-reported).
72
  - task:
73
- type: text-classification # Required. Example: automatic-speech-recognition
74
- name: Natural Language Inference # Optional. Example: Speech Recognition
75
  dataset:
76
- type: alisawuffles/WANLI # Required. Example: common_voice. Use dataset id from https://hf.co/datasets
77
- name: WANLI # Required. A pretty name for the dataset. Example: Common Voice (French)
78
- split: test # Optional. Example: test
79
  metrics:
80
- - type: accuracy # Required. Example: wer. Use metric id from https://hf.co/metrics
81
- value: 0,77 # Required. Example: 20.90
82
- #name: # Optional. Example: Test WER
83
- verified: false # Optional. If true, indicates that evaluation was generated by Hugging Face (vs. self-reported).
84
  - task:
85
- type: text-classification # Required. Example: automatic-speech-recognition
86
- name: Natural Language Inference # Optional. Example: Speech Recognition
87
  dataset:
88
- type: lingnli # Required. Example: common_voice. Use dataset id from https://hf.co/datasets
89
- name: LingNLI # Required. A pretty name for the dataset. Example: Common Voice (French)
90
- split: test # Optional. Example: test
91
  metrics:
92
- - type: accuracy # Required. Example: wer. Use metric id from https://hf.co/metrics
93
- value: 0,87 # Required. Example: 20.90
94
- #name: # Optional. Example: Test WER
95
- verified: false # Optional. If true, indicates that evaluation was generated by Hugging Face (vs. self-reported).
96
-
97
-
98
-
99
  ---
100
 
101
  # DeBERTa-v3-large-mnli-fever-anli-ling-wanli
1
  ---
2
+ language:
3
  - en
4
+ license: mit
5
  tags:
6
  - text-classification
7
  - zero-shot-classification
 
 
 
8
  datasets:
9
  - multi_nli
10
+ - facebook/anli
11
  - fever
12
  - lingnli
13
  - alisawuffles/WANLI
14
+ metrics:
15
+ - accuracy
16
  pipeline_tag: zero-shot-classification
17
+ model-index:
 
 
 
 
18
  - name: DeBERTa-v3-large-mnli-fever-anli-ling-wanli
19
  results:
20
  - task:
21
+ type: text-classification
22
+ name: Natural Language Inference
23
  dataset:
24
+ name: MultiNLI-matched
25
+ type: multi_nli
26
+ split: validation_matched
27
  metrics:
28
+ - type: accuracy
29
+ value: 0,912
30
+ verified: false
 
31
  - task:
32
+ type: text-classification
33
+ name: Natural Language Inference
34
  dataset:
35
+ name: MultiNLI-mismatched
36
+ type: multi_nli
37
+ split: validation_mismatched
38
  metrics:
39
+ - type: accuracy
40
+ value: 0,908
41
+ verified: false
 
42
  - task:
43
+ type: text-classification
44
+ name: Natural Language Inference
45
  dataset:
46
+ name: ANLI-all
47
+ type: anli
48
+ split: test_r1+test_r2+test_r3
49
  metrics:
50
+ - type: accuracy
51
+ value: 0,702
52
+ verified: false
 
53
  - task:
54
+ type: text-classification
55
+ name: Natural Language Inference
56
  dataset:
57
+ name: ANLI-r3
58
+ type: anli
59
+ split: test_r3
60
  metrics:
61
+ - type: accuracy
62
+ value: 0,64
63
+ verified: false
 
64
  - task:
65
+ type: text-classification
66
+ name: Natural Language Inference
67
  dataset:
68
+ name: WANLI
69
+ type: alisawuffles/WANLI
70
+ split: test
71
  metrics:
72
+ - type: accuracy
73
+ value: 0,77
74
+ verified: false
 
75
  - task:
76
+ type: text-classification
77
+ name: Natural Language Inference
78
  dataset:
79
+ name: LingNLI
80
+ type: lingnli
81
+ split: test
82
  metrics:
83
+ - type: accuracy
84
+ value: 0,87
85
+ verified: false
 
 
 
 
86
  ---
87
 
88
  # DeBERTa-v3-large-mnli-fever-anli-ling-wanli