dianamcm commited on
Commit
25b5ad5
1 Parent(s): 8fe1e82

Training in progress, step 500

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "dccuchile/bert-base-spanish-wwm-uncased",
3
  "architectures": [
4
  "BertForTokenClassification"
5
  ],
@@ -38,12 +38,11 @@
38
  "model_type": "bert",
39
  "num_attention_heads": 12,
40
  "num_hidden_layers": 12,
41
- "output_past": true,
42
- "pad_token_id": 1,
43
  "position_embedding_type": "absolute",
44
  "torch_dtype": "float32",
45
- "transformers_version": "4.41.0",
46
  "type_vocab_size": 2,
47
  "use_cache": true,
48
- "vocab_size": 31002
49
  }
 
1
  {
2
+ "_name_or_path": "bert-base-cased",
3
  "architectures": [
4
  "BertForTokenClassification"
5
  ],
 
38
  "model_type": "bert",
39
  "num_attention_heads": 12,
40
  "num_hidden_layers": 12,
41
+ "pad_token_id": 0,
 
42
  "position_embedding_type": "absolute",
43
  "torch_dtype": "float32",
44
+ "transformers_version": "4.41.1",
45
  "type_vocab_size": 2,
46
  "use_cache": true,
47
+ "vocab_size": 28996
48
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:76d45b885b11d2aa75d9fbddde0d2942f6f501e8e76ed6a49fe4115cdd6e34de
3
- size 437092180
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48e6fa9415f33f5dbd8b7468fccc6160c5a245e0453897426aa4c86083e84e0b
3
+ size 430929740
runs/May29_18-53-19_c2d08ad1f5a4/events.out.tfevents.1717008810.c2d08ad1f5a4.477.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:882958865f23b510c6f86aee1e14816cf30f25af9b6a97ddf0134d970e716da9
3
+ size 5185
runs/May29_18-57-45_c2d08ad1f5a4/events.out.tfevents.1717009112.c2d08ad1f5a4.477.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2380126cba77b7354c78f3a23d24ff4b70acace873f380bf3145668beff33b1e
3
+ size 5873
special_tokens_map.json CHANGED
@@ -1,37 +1,7 @@
1
  {
2
- "cls_token": {
3
- "content": "[CLS]",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "mask_token": {
10
- "content": "[MASK]",
11
- "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "pad_token": {
17
- "content": "[PAD]",
18
- "lstrip": false,
19
- "normalized": false,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
- "sep_token": {
24
- "content": "[SEP]",
25
- "lstrip": false,
26
- "normalized": false,
27
- "rstrip": false,
28
- "single_word": false
29
- },
30
- "unk_token": {
31
- "content": "[UNK]",
32
- "lstrip": false,
33
- "normalized": false,
34
- "rstrip": false,
35
- "single_word": false
36
- }
37
  }
 
1
  {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,39 +1,39 @@
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
- "content": "[MASK]",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
8
  "single_word": false,
9
  "special": true
10
  },
11
- "1": {
12
- "content": "[PAD]",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
  "special": true
18
  },
19
- "3": {
20
- "content": "[UNK]",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
26
  },
27
- "4": {
28
- "content": "[CLS]",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
34
  },
35
- "5": {
36
- "content": "[SEP]",
37
  "lstrip": false,
38
  "normalized": false,
39
  "rstrip": false,
@@ -43,14 +43,12 @@
43
  },
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
- "do_basic_tokenize": true,
47
- "do_lower_case": true,
48
  "mask_token": "[MASK]",
49
  "model_max_length": 512,
50
- "never_split": null,
51
  "pad_token": "[PAD]",
52
  "sep_token": "[SEP]",
53
- "strip_accents": false,
54
  "tokenize_chinese_chars": true,
55
  "tokenizer_class": "BertTokenizer",
56
  "unk_token": "[UNK]"
 
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
+ "content": "[PAD]",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
8
  "single_word": false,
9
  "special": true
10
  },
11
+ "100": {
12
+ "content": "[UNK]",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
  "special": true
18
  },
19
+ "101": {
20
+ "content": "[CLS]",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
26
  },
27
+ "102": {
28
+ "content": "[SEP]",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
34
  },
35
+ "103": {
36
+ "content": "[MASK]",
37
  "lstrip": false,
38
  "normalized": false,
39
  "rstrip": false,
 
43
  },
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
+ "do_lower_case": false,
 
47
  "mask_token": "[MASK]",
48
  "model_max_length": 512,
 
49
  "pad_token": "[PAD]",
50
  "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
  "tokenize_chinese_chars": true,
53
  "tokenizer_class": "BertTokenizer",
54
  "unk_token": "[UNK]"
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:08eda0bba9cac7979fd6e479bdef17c26c79f27e08c9f6d6a5d8ebd92c4f276a
3
- size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77de9be0acea5b8a8be6e7bc0190d58de426261dec9de2161dee022bac3a0a89
3
+ size 5112
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff