MuneK commited on
Commit
49163ec
1 Parent(s): 085dd82

Training in progress, epoch 1

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "cl-tohoku/bert-base-japanese-v3",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "LABEL_0",
13
+ "1": "LABEL_1",
14
+ "2": "LABEL_2",
15
+ "3": "LABEL_3",
16
+ "4": "LABEL_4",
17
+ "5": "LABEL_5",
18
+ "6": "LABEL_6",
19
+ "7": "LABEL_7"
20
+ },
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 3072,
23
+ "label2id": {
24
+ "LABEL_0": 0,
25
+ "LABEL_1": 1,
26
+ "LABEL_2": 2,
27
+ "LABEL_3": 3,
28
+ "LABEL_4": 4,
29
+ "LABEL_5": 5,
30
+ "LABEL_6": 6,
31
+ "LABEL_7": 7
32
+ },
33
+ "layer_norm_eps": 1e-12,
34
+ "max_position_embeddings": 512,
35
+ "model_type": "bert",
36
+ "num_attention_heads": 12,
37
+ "num_hidden_layers": 12,
38
+ "pad_token_id": 0,
39
+ "position_embedding_type": "absolute",
40
+ "problem_type": "regression",
41
+ "torch_dtype": "float32",
42
+ "transformers_version": "4.31.0",
43
+ "type_vocab_size": 2,
44
+ "use_cache": true,
45
+ "vocab_size": 32768
46
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b829b557b193f264af917c3aa0c688ab78ecc3e34db9a70832e6ff1b7ce83aa0
3
+ size 444921649
runs/Aug19_06-36-52_d85708438522/events.out.tfevents.1692427082.d85708438522.9090.5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d67f1415d91a39592f9fe774c16acf210093f54c9f43445bd1bdcf50b465ced6
3
+ size 5175
runs/Aug19_06-46-59_d85708438522/events.out.tfevents.1692427624.d85708438522.9090.6 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbc3d778381b189594db66735001ea65a3783ba85343c8505f7f9d1dd0747d13
3
+ size 5175
runs/Aug19_06-56-42_d85708438522/events.out.tfevents.1692428208.d85708438522.9090.7 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcdb601af891424dcfeb3468e80209b75e36a885f8bd3bbbe87e889e533e1338
3
+ size 5173
runs/Aug19_07-27-07_d85708438522/events.out.tfevents.1692430035.d85708438522.9090.8 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0fd29576a06348ee5bd6570cbef0ae76169a3f4d43a173667a87e1fcbf27c6d
3
+ size 4816
runs/Aug19_07-28-13_d85708438522/events.out.tfevents.1692430098.d85708438522.9090.9 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2545d3c4c971bf78818863db3d4875aaa93355746d389c38f9aaaf8b4077ebf
3
+ size 4488
runs/Aug19_07-29-01_d85708438522/events.out.tfevents.1692430145.d85708438522.9090.10 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22c959b2a649b863547a22e1de34b3ccf5c0a00133afaef56158829ec91f6812
3
+ size 4488
runs/Aug19_07-30-45_d85708438522/events.out.tfevents.1692430251.d85708438522.9090.11 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f91df315ef3897fcbd2485e8d4deea42d0edd043517878bb87402796733ce134
3
+ size 4488
runs/Aug19_07-33-35_d85708438522/events.out.tfevents.1692430420.d85708438522.9090.12 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:687f710b56c3ea16adb6ea2324751c4c6ac5805c882a5e3987724ea052b48288
3
+ size 4964
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "do_subword_tokenize": true,
6
+ "do_word_tokenize": true,
7
+ "jumanpp_kwargs": null,
8
+ "mask_token": "[MASK]",
9
+ "mecab_kwargs": {
10
+ "mecab_dic": "unidic_lite"
11
+ },
12
+ "model_max_length": 512,
13
+ "never_split": null,
14
+ "pad_token": "[PAD]",
15
+ "sep_token": "[SEP]",
16
+ "subword_tokenizer_type": "wordpiece",
17
+ "sudachi_kwargs": null,
18
+ "tokenizer_class": "BertJapaneseTokenizer",
19
+ "unk_token": "[UNK]",
20
+ "word_tokenizer_type": "mecab"
21
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e9957a472e0a901c7d553b5d8c4986a28670c57354e58fbf86d1aaae5b7ee9d
3
+ size 4027
vocab.txt ADDED
The diff for this file is too large to render. See raw diff