cammy commited on
Commit
7a5a613
1 Parent(s): 5e6a9ba

Training in progress, epoch 1

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endofpiece|>": 50266,
3
+ "<|startofpiece|>": 50265,
4
+ "[MASK]": 50267
5
+ }
config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "BAAI/glm-roberta-large",
3
+ "architectures": [
4
+ "GLMForConditionalGeneration"
5
+ ],
6
+ "attention_dropout_prob": 0.1,
7
+ "attention_scale": 1.0,
8
+ "auto_map": {
9
+ "AutoConfig": "configuration_glm.GLMConfig",
10
+ "AutoModel": "modeling_glm.GLMModel",
11
+ "AutoModelForMultipleChoice": "modeling_glm.GLMForMultipleChoice",
12
+ "AutoModelForSeq2SeqLM": "modeling_glm.GLMForConditionalGeneration"
13
+ },
14
+ "block_position_encoding": true,
15
+ "checkpoint_activations": false,
16
+ "checkpoint_num_layers": 1,
17
+ "embedding_dropout_prob": 0.1,
18
+ "hidden_size": 1024,
19
+ "initializer_range": 0.02,
20
+ "max_sequence_length": 512,
21
+ "model_type": "glm",
22
+ "num_attention_heads": 16,
23
+ "num_layers": 24,
24
+ "output_dropout_prob": 0.1,
25
+ "output_predict": true,
26
+ "parallel_output": true,
27
+ "pool_token": "cls",
28
+ "relative_encoding": false,
29
+ "spell_func": "lstm",
30
+ "spell_length": null,
31
+ "torch_dtype": "float16",
32
+ "transformers_version": "4.24.0",
33
+ "vocab_size": 50304
34
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8f92746e5cb090ffef4d65d831110764d3a37e7f4ee31c1eb0c97dbe4081725
3
+ size 709847841
runs/Jan07_13-20-54_821e18845ca1/1673097661.8862956/events.out.tfevents.1673097661.821e18845ca1.1305.19 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0b9820e97476776974c94192d56cf40ce9ed07b485217c99c510633dd3a2430
3
+ size 5745
runs/Jan07_13-20-54_821e18845ca1/events.out.tfevents.1673097661.821e18845ca1.1305.18 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:405bc73aa759c21ea54037941dac12f6251556ae3a5ccb591b758261d901e40b
3
+ size 5082
special_tokens_map.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|startofpiece|>",
4
+ "<|endofpiece|>"
5
+ ],
6
+ "bos_token": {
7
+ "content": "<s>",
8
+ "lstrip": false,
9
+ "normalized": true,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "cls_token": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "eos_token": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "mask_token": {
28
+ "content": "[MASK]",
29
+ "lstrip": true,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ },
34
+ "pad_token": {
35
+ "content": "</s>",
36
+ "lstrip": false,
37
+ "normalized": true,
38
+ "rstrip": false,
39
+ "single_word": false
40
+ },
41
+ "sep_token": {
42
+ "content": "</s>",
43
+ "lstrip": false,
44
+ "normalized": true,
45
+ "rstrip": false,
46
+ "single_word": false
47
+ },
48
+ "unk_token": {
49
+ "content": "<unk>",
50
+ "lstrip": false,
51
+ "normalized": true,
52
+ "rstrip": false,
53
+ "single_word": false
54
+ }
55
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "additional_special_tokens": [
4
+ "<|startofpiece|>",
5
+ "<|endofpiece|>"
6
+ ],
7
+ "bos_token": {
8
+ "__type": "AddedToken",
9
+ "content": "<s>",
10
+ "lstrip": false,
11
+ "normalized": true,
12
+ "rstrip": false,
13
+ "single_word": false
14
+ },
15
+ "cls_token": {
16
+ "__type": "AddedToken",
17
+ "content": "<s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "eos_token": {
24
+ "__type": "AddedToken",
25
+ "content": "</s>",
26
+ "lstrip": false,
27
+ "normalized": true,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
+ "errors": "replace",
32
+ "mask_token": {
33
+ "__type": "AddedToken",
34
+ "content": "[MASK]",
35
+ "lstrip": true,
36
+ "normalized": true,
37
+ "rstrip": false,
38
+ "single_word": false
39
+ },
40
+ "name_or_path": "BAAI/glm-roberta-large",
41
+ "pad_token": {
42
+ "__type": "AddedToken",
43
+ "content": "</s>",
44
+ "lstrip": false,
45
+ "normalized": true,
46
+ "rstrip": false,
47
+ "single_word": false
48
+ },
49
+ "sep_token": {
50
+ "__type": "AddedToken",
51
+ "content": "</s>",
52
+ "lstrip": false,
53
+ "normalized": true,
54
+ "rstrip": false,
55
+ "single_word": false
56
+ },
57
+ "special_tokens_map_file": null,
58
+ "tokenizer_class": "GLMRobertaTokenizer",
59
+ "unk_token": {
60
+ "__type": "AddedToken",
61
+ "content": "<unk>",
62
+ "lstrip": false,
63
+ "normalized": true,
64
+ "rstrip": false,
65
+ "single_word": false
66
+ },
67
+ "use_fast": false
68
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0347ceaa14dcdb7e747ce6d9857d892ff3330f5b74404a830cbe1dbbb3bb133
3
+ size 3579
vocab.json ADDED
The diff for this file is too large to render. See raw diff