lrodrigues commited on
Commit
5694aea
1 Parent(s): c40af9d
.gitattributes CHANGED
@@ -20,7 +20,6 @@
20
  *.pt filter=lfs diff=lfs merge=lfs -text
21
  *.pth filter=lfs diff=lfs merge=lfs -text
22
  *.rar filter=lfs diff=lfs merge=lfs -text
23
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
24
  *.tar.* filter=lfs diff=lfs merge=lfs -text
25
  *.tflite filter=lfs diff=lfs merge=lfs -text
26
  *.tgz filter=lfs diff=lfs merge=lfs -text
@@ -29,3 +28,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
  *.zip filter=lfs diff=lfs merge=lfs -text
30
  *.zst filter=lfs diff=lfs merge=lfs -text
31
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
20
  *.pt filter=lfs diff=lfs merge=lfs -text
21
  *.pth filter=lfs diff=lfs merge=lfs -text
22
  *.rar filter=lfs diff=lfs merge=lfs -text
 
23
  *.tar.* filter=lfs diff=lfs merge=lfs -text
24
  *.tflite filter=lfs diff=lfs merge=lfs -text
25
  *.tgz filter=lfs diff=lfs merge=lfs -text
28
  *.zip filter=lfs diff=lfs merge=lfs -text
29
  *.zst filter=lfs diff=lfs merge=lfs -text
30
  *tfevents* filter=lfs diff=lfs merge=lfs -text
31
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ datasets:
3
+ - squad_v2
4
+ language: en
5
+ license: mit
6
+ pipeline_tag: question-answering
7
+ tags:
8
+ - deberta
9
+ - deberta-v3
10
+ model-index:
11
+ - name: navteca/deberta-v3-base-squad2
12
+ results:
13
+ - task:
14
+ type: question-answering
15
+ name: Question Answering
16
+ dataset:
17
+ name: squad_v2
18
+ type: squad_v2
19
+ config: squad_v2
20
+ split: validation
21
+ metrics:
22
+ - name: Exact Match
23
+ type: exact_match
24
+ value: 88.0876
25
+ verified: true
26
+ - name: F1
27
+ type: f1
28
+ value: 91.1623
29
+ verified: true
30
+ - task:
31
+ type: question-answering
32
+ name: Question Answering
33
+ dataset:
34
+ name: squad
35
+ type: squad
36
+ config: plain_text
37
+ split: validation
38
+ metrics:
39
+ - name: Exact Match
40
+ type: exact_match
41
+ value: 89.2366
42
+ verified: true
43
+ - name: F1
44
+ type: f1
45
+ value: 95.0569
46
+ verified: true
47
+ ---
48
+
49
+ # Deberta v3 large model for QA (SQuAD 2.0)
50
+
51
+ This is the [deberta-v3-large](https://huggingface.co/microsoft/deberta-v3-large) model, fine-tuned using the [SQuAD2.0](https://huggingface.co/datasets/squad_v2) dataset. It's been trained on question-answer pairs, including unanswerable questions, for the task of Question Answering.
52
+
53
+ ## Training Data
54
+ The models have been trained on the [SQuAD 2.0](https://rajpurkar.github.io/SQuAD-explorer/) dataset.
55
+
56
+ It can be used for question answering task.
57
+
58
+ ## Usage and Performance
59
+ The trained model can be used like this:
60
+ ```python
61
+ from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
62
+
63
+ # Load model & tokenizer
64
+ deberta_model = AutoModelForQuestionAnswering.from_pretrained('navteca/deberta-v3-large-squad2')
65
+ deberta_tokenizer = AutoTokenizer.from_pretrained('navteca/deberta-v3-large-squad2')
66
+
67
+ # Get predictions
68
+ nlp = pipeline('question-answering', model=deberta_model, tokenizer=deberta_tokenizer)
69
+
70
+ result = nlp({
71
+ 'question': 'How many people live in Berlin?',
72
+ 'context': 'Berlin had a population of 3,520,031 registered inhabitants in an area of 891.82 square kilometers.'
73
+ })
74
+
75
+ print(result)
76
+
77
+ #{
78
+ # "answer": "3,520,031"
79
+ # "end": 36,
80
+ # "score": 0.96186668,
81
+ # "start": 27,
82
+ #}
83
+ ```
84
+
85
+ ## Author
86
+ [deepset](http://deepset.ai/)
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ {
2
+ "[MASK]": 128000
3
+ }
config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DebertaV2ForQuestionAnswering"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "hidden_act": "gelu",
7
+ "hidden_dropout_prob": 0.1,
8
+ "hidden_size": 1024,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 4096,
11
+ "language": "english",
12
+ "layer_norm_eps": 1e-7,
13
+ "max_position_embeddings": 512,
14
+ "max_relative_positions": -1,
15
+ "model_type": "deberta-v2",
16
+ "name": "DebertaV2",
17
+ "norm_rel_ebd": "layer_norm",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "pad_token_id": 0,
21
+ "pooler_dropout": 0,
22
+ "pooler_hidden_act": "gelu",
23
+ "pooler_hidden_size": 1024,
24
+ "pos_att_type": [
25
+ "p2c",
26
+ "c2p"
27
+ ],
28
+ "position_biased_input": false,
29
+ "position_buckets": 256,
30
+ "relative_attention": true,
31
+ "share_att_key": true,
32
+ "summary_activation": "tanh",
33
+ "summary_last_dropout": 0,
34
+ "summary_type": "first",
35
+ "summary_use_proj": false,
36
+ "torch_dtype": "float32",
37
+ "transformers_version": "4.19.0",
38
+ "type_vocab_size": 0,
39
+ "vocab_size": 128100
40
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc31220db2ad55672fea1f369664c17628c021b528b1ae65b4b3f2bc7c6910e4
3
+ size 1736194351
special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
8
+ "unk_token": "[UNK]"
9
+ }
spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c679fbf93643d19aab7ee10c0b99e460bdbc02fedf34b92b05af343b4af586fd
3
+ size 2464616
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "eos_token": "[SEP]",
6
+ "mask_token": "[MASK]",
7
+ "pad_token": "[PAD]",
8
+ "sep_token": "[SEP]",
9
+ "sp_model_kwargs": {},
10
+ "special_tokens_map_file": null,
11
+ "split_by_punct": false,
12
+ "tokenizer_class": "DebertaV2Tokenizer",
13
+ "unk_token": "[UNK]",
14
+ "vocab_type": "spm"
15
+ }