tasinhoque commited on
Commit
90503a4
1 Parent(s): cb50ccc

Training in progress, step 500

Browse files
config.json CHANGED
@@ -1,13 +1,15 @@
1
  {
2
- "_name_or_path": "distilbert-base-uncased",
3
- "activation": "gelu",
4
  "architectures": [
5
- "DistilBertForSequenceClassification"
6
  ],
7
- "attention_dropout": 0.1,
8
- "dim": 768,
9
- "dropout": 0.1,
10
- "hidden_dim": 3072,
 
 
 
11
  "id2label": {
12
  "0": "LABEL_0",
13
  "1": "LABEL_1",
@@ -39,6 +41,7 @@
39
  "27": "LABEL_27"
40
  },
41
  "initializer_range": 0.02,
 
42
  "label2id": {
43
  "LABEL_0": 0,
44
  "LABEL_1": 1,
@@ -69,17 +72,17 @@
69
  "LABEL_8": 8,
70
  "LABEL_9": 9
71
  },
72
- "max_position_embeddings": 512,
73
- "model_type": "distilbert",
74
- "n_heads": 12,
75
- "n_layers": 6,
76
- "pad_token_id": 0,
 
 
77
  "problem_type": "multi_label_classification",
78
- "qa_dropout": 0.1,
79
- "seq_classif_dropout": 0.2,
80
- "sinusoidal_pos_embds": false,
81
- "tie_weights_": true,
82
  "torch_dtype": "float32",
83
- "transformers_version": "4.26.1",
84
- "vocab_size": 30522
 
 
85
  }
 
1
  {
2
+ "_name_or_path": "roberta-large",
 
3
  "architectures": [
4
+ "RobertaForSequenceClassification"
5
  ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
  "id2label": {
14
  "0": "LABEL_0",
15
  "1": "LABEL_1",
 
41
  "27": "LABEL_27"
42
  },
43
  "initializer_range": 0.02,
44
+ "intermediate_size": 4096,
45
  "label2id": {
46
  "LABEL_0": 0,
47
  "LABEL_1": 1,
 
72
  "LABEL_8": 8,
73
  "LABEL_9": 9
74
  },
75
+ "layer_norm_eps": 1e-05,
76
+ "max_position_embeddings": 514,
77
+ "model_type": "roberta",
78
+ "num_attention_heads": 16,
79
+ "num_hidden_layers": 24,
80
+ "pad_token_id": 1,
81
+ "position_embedding_type": "absolute",
82
  "problem_type": "multi_label_classification",
 
 
 
 
83
  "torch_dtype": "float32",
84
+ "transformers_version": "4.20.1",
85
+ "type_vocab_size": 1,
86
+ "use_cache": true,
87
+ "vocab_size": 50265
88
  }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:19424ed24da12c023d56f789d211e4072e8898e193049073f2215eb1df0ab86d
3
- size 267935469
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75a3c0978a2d968979dcd40e9874ee65eb348d2a7cdeda93062ee42b81ae4667
3
+ size 1421690925
runs/Feb21_13-56-12_09b62d1b427a/1676987856.6785717/events.out.tfevents.1676987856.09b62d1b427a.23.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c38df964ba4ef3d77cdc680ae19e627350ed7a6b3eeccbf2eba6a925c2176789
3
+ size 5371
runs/Feb21_13-56-12_09b62d1b427a/events.out.tfevents.1676987856.09b62d1b427a.23.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5005430982c758b0740f35e8b1a4da4dd5653b4db595814eedd78a153e71298
3
+ size 5360
special_tokens_map.json CHANGED
@@ -1,7 +1,15 @@
1
  {
2
- "cls_token": "[CLS]",
3
- "mask_token": "[MASK]",
4
- "pad_token": "[PAD]",
5
- "sep_token": "[SEP]",
6
- "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
7
  }
 
1
  {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,14 +1,16 @@
1
  {
2
- "cls_token": "[CLS]",
3
- "do_lower_case": true,
4
- "mask_token": "[MASK]",
 
 
 
5
  "model_max_length": 512,
6
- "name_or_path": "distilbert-base-uncased",
7
- "pad_token": "[PAD]",
8
- "sep_token": "[SEP]",
9
  "special_tokens_map_file": null,
10
- "strip_accents": null,
11
- "tokenize_chinese_chars": true,
12
- "tokenizer_class": "DistilBertTokenizer",
13
- "unk_token": "[UNK]"
14
  }
 
1
  {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<s>",
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "errors": "replace",
7
+ "mask_token": "<mask>",
8
  "model_max_length": 512,
9
+ "name_or_path": "roberta-large",
10
+ "pad_token": "<pad>",
11
+ "sep_token": "</s>",
12
  "special_tokens_map_file": null,
13
+ "tokenizer_class": "RobertaTokenizer",
14
+ "trim_offsets": true,
15
+ "unk_token": "<unk>"
 
16
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:22a0da41b689868699c951e90991f305cadd15e009bef51b47f3abd8845815b0
3
- size 3515
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fbde008084afe37bef40b2989c71a408dc4ddcfc7f285e0a249e6e675cd9f32
3
+ size 3311
vocab.json ADDED
The diff for this file is too large to render. See raw diff