afbudiman commited on
Commit
ef2865f
1 Parent(s): c75f2ac

Training in progress, step 500

Browse files
config.json CHANGED
@@ -37,7 +37,7 @@
37
  "position_embedding_type": "absolute",
38
  "problem_type": "single_label_classification",
39
  "torch_dtype": "float32",
40
- "transformers_version": "4.17.0",
41
  "type_vocab_size": 2,
42
  "use_cache": true,
43
  "vocab_size": 50000
37
  "position_embedding_type": "absolute",
38
  "problem_type": "single_label_classification",
39
  "torch_dtype": "float32",
40
+ "transformers_version": "4.18.0",
41
  "type_vocab_size": 2,
42
  "use_cache": true,
43
  "vocab_size": 50000
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e8a06634db120c501d51b5e9bd3121fa621f74cd40b0a8e8f26377fb61e74505
3
- size 497859629
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cded1a66db12dd9e4802a879bec1dcd16e44539a6239938fd28f224688dca92
3
+ size 497845613
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"do_lower_case": true, "do_basic_tokenize": true, "never_split": null, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "special_tokens_map_file": "/root/.cache/huggingface/transformers/b515a756d9ddf12a7a391ea596c488ac805f0576790934e590ce250a3e4ff056.dd8bd9bfd3664b530ea4e645105f557769387b3da9f79bdb55ed556bdd80611d", "name_or_path": "indobenchmark/indobert-base-p1", "tokenizer_class": "BertTokenizer"}
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "special_tokens_map_file": "/root/.cache/huggingface/transformers/b515a756d9ddf12a7a391ea596c488ac805f0576790934e590ce250a3e4ff056.dd8bd9bfd3664b530ea4e645105f557769387b3da9f79bdb55ed556bdd80611d", "name_or_path": "indobenchmark/indobert-base-p1", "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "BertTokenizer"}
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:11741e32e83b49984fd43267a9af6fa256c4e54cee31dbe41df35fad5ee12c54
3
  size 2991
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbe8564743553ea6ffb6c952d46862d47d703dd28409c5cb24ffd8f6d9d6dec1
3
  size 2991