CarlosDataAnalysis commited on
Commit
3521546
1 Parent(s): a309f93

Training in progress, epoch 1

Browse files
adapter_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "auto_mapping": null,
3
- "base_model_name_or_path": "bert-base-cased",
4
  "bias": "all",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
 
1
  {
2
  "auto_mapping": null,
3
+ "base_model_name_or_path": "mrm8488/TinyBERT-spanish-uncased-finetuned-ner",
4
  "bias": "all",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c08232c42610bb1f2cbaeb51e3207c928aa138d33e65ac6a9b930430ba047abe
3
- size 2848477
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6413772851e993811224973a7b9ee1c6a67679fc398a9f41891408ca2dc60f8
3
+ size 403533
added_tokens.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "[CLS]": 101,
3
- "[MASK]": 103,
4
- "[PAD]": 0,
5
- "[SEP]": 102,
6
- "[UNK]": 100
7
  }
 
1
  {
2
+ "[CLS]": 4,
3
+ "[MASK]": 0,
4
+ "[PAD]": 1,
5
+ "[SEP]": 5,
6
+ "[UNK]": 3
7
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,39 +1,39 @@
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
- "content": "[PAD]",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
8
  "single_word": false,
9
  "special": true
10
  },
11
- "100": {
12
- "content": "[UNK]",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
  "special": true
18
  },
19
- "101": {
20
- "content": "[CLS]",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
26
  },
27
- "102": {
28
- "content": "[SEP]",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
34
  },
35
- "103": {
36
- "content": "[MASK]",
37
  "lstrip": false,
38
  "normalized": false,
39
  "rstrip": false,
@@ -44,9 +44,11 @@
44
  "additional_special_tokens": [],
45
  "clean_up_tokenization_spaces": true,
46
  "cls_token": "[CLS]",
 
47
  "do_lower_case": false,
48
  "mask_token": "[MASK]",
49
- "model_max_length": 512,
 
50
  "pad_token": "[PAD]",
51
  "padding": true,
52
  "sep_token": "[SEP]",
 
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
+ "content": "[MASK]",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
8
  "single_word": false,
9
  "special": true
10
  },
11
+ "1": {
12
+ "content": "[PAD]",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
  "special": true
18
  },
19
+ "3": {
20
+ "content": "[UNK]",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
26
  },
27
+ "4": {
28
+ "content": "[CLS]",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
34
  },
35
+ "5": {
36
+ "content": "[SEP]",
37
  "lstrip": false,
38
  "normalized": false,
39
  "rstrip": false,
 
44
  "additional_special_tokens": [],
45
  "clean_up_tokenization_spaces": true,
46
  "cls_token": "[CLS]",
47
+ "do_basic_tokenize": true,
48
  "do_lower_case": false,
49
  "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "never_split": null,
52
  "pad_token": "[PAD]",
53
  "padding": true,
54
  "sep_token": "[SEP]",
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff