againeureka commited on
Commit
ad766ac
1 Parent(s): d9b2bf7

Training in progress, epoch 1

Browse files
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.6930232558139535,
4
+ "eval_f1": 0.7011293231597724,
5
+ "eval_loss": 0.866123378276825,
6
+ "eval_precision": 0.6911200599392715,
7
+ "eval_recall": 0.7123187113906709,
8
+ "eval_runtime": 1.5006,
9
+ "eval_samples_per_second": 1289.482,
10
+ "eval_steps_per_second": 80.634,
11
+ "total_flos": 919368802606944.0,
12
+ "train_loss": 1.1051753743489583,
13
+ "train_runtime": 87.8341,
14
+ "train_samples_per_second": 232.142,
15
+ "train_steps_per_second": 2.562
16
+ }
config.json CHANGED
@@ -46,5 +46,5 @@
46
  "transformers_version": "4.28.1",
47
  "type_vocab_size": 1,
48
  "use_cache": true,
49
- "vocab_size": 42000
50
  }
 
46
  "transformers_version": "4.28.1",
47
  "type_vocab_size": 1,
48
  "use_cache": true,
49
+ "vocab_size": 64000
50
  }
eval_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.6930232558139535,
4
+ "eval_f1": 0.7011293231597724,
5
+ "eval_loss": 0.866123378276825,
6
+ "eval_precision": 0.6911200599392715,
7
+ "eval_recall": 0.7123187113906709,
8
+ "eval_runtime": 1.5006,
9
+ "eval_samples_per_second": 1289.482,
10
+ "eval_steps_per_second": 80.634
11
+ }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:517b446fcba0af41ad6840f71401b751c47f50785d9cc8caad8115054e434249
3
- size 473290421
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c74f06eeaa3925d7152bb7005e600226c0ee78d994ba0782307d8fbf2ce055ec
3
+ size 540874421
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -8,7 +8,7 @@
8
  "never_split": null,
9
  "pad_token": "[PAD]",
10
  "sep_token": "[SEP]",
11
- "strip_accents": false,
12
  "tokenize_chinese_chars": true,
13
  "tokenizer_class": "BertTokenizer",
14
  "unk_token": "[UNK]"
 
8
  "never_split": null,
9
  "pad_token": "[PAD]",
10
  "sep_token": "[SEP]",
11
+ "strip_accents": null,
12
  "tokenize_chinese_chars": true,
13
  "tokenizer_class": "BertTokenizer",
14
  "unk_token": "[UNK]"
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "total_flos": 919368802606944.0,
4
+ "train_loss": 1.1051753743489583,
5
+ "train_runtime": 87.8341,
6
+ "train_samples_per_second": 232.142,
7
+ "train_steps_per_second": 2.562
8
+ }
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff