HyungYoun commited on
Commit
579885f
1 Parent(s): ff8c21f

Training in progress epoch 0

Browse files
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ step_*
2
+ epoch_*
config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "xlm-roberta-base",
3
+ "architectures": [
4
+ "XLMRobertaForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "B-GANPAN",
15
+ "1": "B-NOISE",
16
+ "2": "B-TEL"
17
+ },
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
+ "label2id": {
21
+ "B-GANPAN": 0,
22
+ "B-NOISE": 1,
23
+ "B-TEL": 2
24
+ },
25
+ "layer_norm_eps": 1e-05,
26
+ "max_position_embeddings": 514,
27
+ "model_type": "xlm-roberta",
28
+ "num_attention_heads": 12,
29
+ "num_hidden_layers": 12,
30
+ "output_past": true,
31
+ "pad_token_id": 1,
32
+ "position_embedding_type": "absolute",
33
+ "torch_dtype": "float32",
34
+ "transformers_version": "4.23.0",
35
+ "type_vocab_size": 1,
36
+ "use_cache": true,
37
+ "vocab_size": 250002
38
+ }
ner_no_trainer/1669560693.7771602/events.out.tfevents.1669560693.4d9dc08cf8e8.13169.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1374ea2b9fa03aec5680c147db98bc70e54081eecae3d5f3d52ff3d53417a611
3
+ size 1675
ner_no_trainer/1669560693.7797265/hparams.yml ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpointing_steps: null
2
+ config_name: null
3
+ dataset_config_name: null
4
+ dataset_name: null
5
+ debug: false
6
+ gradient_accumulation_steps: 1
7
+ hub_model_id: HyungYoun/xlm-no-I
8
+ hub_token: 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
9
+ ignore_mismatched_sizes: false
10
+ label_all_tokens: false
11
+ label_column_name: null
12
+ learning_rate: 2.0e-05
13
+ lr_scheduler_type: linear
14
+ max_length: 128
15
+ max_train_steps: 6474
16
+ model_name_or_path: xlm-roberta-base
17
+ model_type: null
18
+ num_train_epochs: 3
19
+ num_warmup_steps: 0
20
+ output_dir: /xlm-no-I/
21
+ pad_to_max_length: false
22
+ per_device_eval_batch_size: 8
23
+ per_device_train_batch_size: 256
24
+ push_to_hub: true
25
+ report_to: all
26
+ resume_from_checkpoint: null
27
+ return_entity_level_metrics: false
28
+ seed: null
29
+ task_name: ner
30
+ text_column_name: null
31
+ tokenizer_name: null
32
+ train_file: /hdd/RoadView/ner/ner_word_train_no_i.json
33
+ validation_file: /hdd/RoadView/ner/ner_word_val_no_i.json
34
+ weight_decay: 0.0
35
+ with_tracking: true
ner_no_trainer/events.out.tfevents.1669560693.4d9dc08cf8e8.13169.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:700626d23f7dcd68d2c046310484b7c57f94718d76e49c7c6c0ccf74d2e12645
3
+ size 176
ner_no_trainer/seqeval_accuracy/events.out.tfevents.1669562101.4d9dc08cf8e8.13169.5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2a342844621bfa3cdbf323008e331f6caf8dc0903be02816b1859a0057cae17
3
+ size 86
ner_no_trainer/seqeval_f1/events.out.tfevents.1669562101.4d9dc08cf8e8.13169.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b268e307d2a580c6e4ae384bf0a596c90970590911eabf072f463bcfc64ed388
3
+ size 86
ner_no_trainer/seqeval_precision/events.out.tfevents.1669562101.4d9dc08cf8e8.13169.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:631fc62fa739fb6954fb7efdf7d0f184e0456feec26cce07ad7b51e8afd20b53
3
+ size 86
ner_no_trainer/seqeval_recall/events.out.tfevents.1669562101.4d9dc08cf8e8.13169.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:292d0d7a684790a18056885dd24220d72a1b9c53fb485e0af29c5e790c8c2f2b
3
+ size 86
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de92988dfb34d1c1215a27f87f03acda532ce249324297976ba68957edd6b1b1
3
+ size 1109892529
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1edb0658cb47689db5cf78194ebe041bba3b6b775d1f1069fc9501b372d4acb0
3
+ size 17082758
tokenizer_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "__type": "AddedToken",
7
+ "content": "<mask>",
8
+ "lstrip": true,
9
+ "normalized": true,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "model_max_length": 512,
14
+ "name_or_path": "xlm-roberta-base",
15
+ "pad_token": "<pad>",
16
+ "sep_token": "</s>",
17
+ "special_tokens_map_file": null,
18
+ "tokenizer_class": "XLMRobertaTokenizer",
19
+ "unk_token": "<unk>"
20
+ }