chitanda commited on
Commit
30f7a38
1 Parent(s): e57e657

upload fp16 models

Browse files
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"[MASK]": 128000}
config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "pretrained-models/deberta-v2-xxlarge",
3
+ "add_enhanced_decoder": true,
4
+ "architectures": [
5
+ "DebertaV2ForMultipleChoicePreTrain"
6
+ ],
7
+ "attention_head_size": 64,
8
+ "attention_probs_dropout_prob": 0.1,
9
+ "conv_act": "gelu",
10
+ "conv_kernel_size": 3,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 1536,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 6144,
16
+ "layer_norm_eps": 1e-07,
17
+ "max_position_embeddings": 512,
18
+ "max_relative_positions": -1,
19
+ "mlp_hidden_size": 3072,
20
+ "model_type": "deberta-v2",
21
+ "norm_rel_ebd": "layer_norm",
22
+ "num_attention_heads": 24,
23
+ "num_hidden_layers": 48,
24
+ "pad_token_id": 0,
25
+ "pooler_dropout": 0,
26
+ "pooler_hidden_act": "gelu",
27
+ "pooler_hidden_size": 1536,
28
+ "pos_att_type": [
29
+ "p2c",
30
+ "c2p"
31
+ ],
32
+ "position_biased_input": false,
33
+ "position_buckets": 256,
34
+ "relative_attention": true,
35
+ "share_att_key": true,
36
+ "torch_dtype": "float32",
37
+ "transformers_version": "4.15.0",
38
+ "type_vocab_size": 0,
39
+ "use_stable_embedding": false,
40
+ "vocab_size": 128100
41
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74d9f7ed01565f457923e0c26f1a86c21ffd30e7bfa694eded38971658b75639
3
+ size 3539235155
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5598d5e96f339a8d980c15f9afd405a2e5e1be7db41de3ed13b0f03fac1e8c17
3
+ size 2447305
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": false, "bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "split_by_punct": false, "sp_model_kwargs": {}, "vocab_type": "spm", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "pretrained-models/deberta-v2-xxlarge", "tokenizer_class": "DebertaV2Tokenizer"}
training_config.yaml ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ train_file: wiki_erica_path/v7/union/train_distant.path_v7.train.0.pkl
2
+ dev_file: wiki_erica_path/v7/union/train_distant.path_v7.dev.pkl
3
+ test_file: null
4
+ model:
5
+ _target_: models.deberta.DebertaV2ForMultipleChoicePreTrain.from_pretrained
6
+ mlp_hidden_size: 3072
7
+ fs_checkpoint: false
8
+ fs_checkpoint_offload_to_cpu: false
9
+ read_tensor:
10
+ _target_: dataset.wiki_entity_path_v8_2.convert_examples_into_features
11
+ max_neg_num: 3
12
+ aug_num: 1
13
+ max_seq_length: 256
14
+ shuffle_context: true
15
+ min_rep_num: 5
16
+ geo_p: 0.4
17
+ deduct_ratio: 1.0
18
+ context_ratio: 1.0
19
+ num_workers: 32
20
+ extended_vocab: null
21
+ collator:
22
+ _target_: dataset.wiki_entity_path_v8.WikiPathDatasetCollatorWithContext
23
+ max_seq_length: 256
24
+ tokenizer: pretrained-models/deberta-v2-xxlarge
25
+ mlm_probability: 0.15
26
+ max_option_num: 4
27
+ swap: true
28
+ num_workers: 4
29
+ prefetch_factor: 4
30
+ model_name_or_path: pretrained-models/deberta-v2-xxlarge
31
+ pretrain: null
32
+ output_dir: experiments/deberta.v2.xxlarge.path.v7_v8.2.2.1aug.ctx.A100.v1.3.w4.s${seed}.fsdp.adamw
33
+ do_train: Train
34
+ evaluate_during_training: true
35
+ do_eval: false
36
+ eval_sub_path: null
37
+ do_preprocess: false
38
+ per_gpu_train_batch_size: 2
39
+ per_gpu_eval_batch_size: 2
40
+ learning_rate: 1.0e-05
41
+ gradient_accumulation_steps: 512
42
+ weight_decay: 0.01
43
+ adam_epsilon: 1.0e-06
44
+ adam_betas: (0.9, 0.999)
45
+ max_grad_norm: 1.0
46
+ num_train_epochs: 1
47
+ max_steps: 200
48
+ warmup_proportion: 0.2
49
+ warmup_steps: 0
50
+ optimizer: null
51
+ use_nvlamb: null
52
+ bit_training: null
53
+ multi_tensor: null
54
+ logging_steps: 1
55
+ save_steps: 50
56
+ eval_steps: 50
57
+ no_cuda: false
58
+ seed: 42
59
+ local_rank: 0
60
+ fp16: true
61
+ fp16_opt_level: O2
62
+ ds_cfg:
63
+ train_micro_batch_size_per_gpu: ${per_gpu_train_batch_size}
64
+ gradient_accumulation_steps: ${gradient_accumulation_steps}
65
+ optimizer:
66
+ type: AdamW
67
+ params:
68
+ lr: ${learning_rate}
69
+ betas:
70
+ - 0.9
71
+ - 0.999
72
+ eps: ${adam_epsilon}
73
+ weight_decay: ${weight_decay}
74
+ scheduler:
75
+ type: WarmupDecayLR
76
+ params:
77
+ total_num_steps: null
78
+ warmup_max_lr: ${learning_rate}
79
+ warmup_num_steps: null
80
+ warmup_type: linear
81
+ gradient_clipping: ${max_grad_norm}
82
+ fp16:
83
+ enabled: ${fp16}
84
+ initial_scale_power: 12
85
+ zero_optimization:
86
+ stage: 3
87
+ steps_per_print: 1024
88
+ reshard_after_forward: false
89
+ flatten_parameters: true
90
+ move_grads_to_cpu: false
91
+ move_params_to_cpu: false
92
+ n_gpu: 1
93
+ device: cuda:0
94
+ train_batch_size: 2
95
+ eval_batch_size: 2
96
+ note: null