liangyuxin commited on
Commit
a910db0
1 Parent(s): 85c40aa
cog-pretrain.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ea6f4164152bc58d23e24e48f7bf4187aad72a32e97ec4b3acc832fe183cbc2
3
+ size 1021864
config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "encoder_model_path": "/encoder/",
3
  "decoder_model_path": "/decoder/",
4
- "sentencepiece_model_path":"/decoder/cog-pretrain.model",
5
  "latent_size":256,
6
  "seed": 42,
7
  "ratio_increase": 0.5,
@@ -31,5 +31,46 @@
31
  "logger": true,
32
  "checkpoint_callback": null,
33
  "enable_checkpointing": true,
34
- "process_position": 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  }
 
 
1
  {
2
  "encoder_model_path": "/encoder/",
3
  "decoder_model_path": "/decoder/",
4
+ "sentencepiece_model_path":"cog-pretrain.model",
5
  "latent_size":256,
6
  "seed": 42,
7
  "ratio_increase": 0.5,
 
31
  "logger": true,
32
  "checkpoint_callback": null,
33
  "enable_checkpointing": true,
34
+ "process_position": 0,
35
+ "encoder":{
36
+ "architectures": [
37
+ "BertForMaskedLM"
38
+ ],
39
+ "attention_probs_dropout_prob": 0.1,
40
+ "directionality": "bidi",
41
+ "hidden_act": "gelu",
42
+ "hidden_dropout_prob": 0.1,
43
+ "hidden_size": 768,
44
+ "initializer_range": 0.02,
45
+ "intermediate_size": 3072,
46
+ "layer_norm_eps": 1e-12,
47
+ "max_position_embeddings": 512,
48
+ "model_type": "bert",
49
+ "num_attention_heads": 12,
50
+ "num_hidden_layers": 12,
51
+ "pad_token_id": 0,
52
+ "pooler_fc_size": 768,
53
+ "pooler_num_attention_heads": 12,
54
+ "pooler_num_fc_layers": 3,
55
+ "pooler_size_per_head": 128,
56
+ "pooler_type": "first_token_transform",
57
+ "type_vocab_size": 2,
58
+ "vocab_size": 21128
59
+ },
60
+ "decoder":{
61
+ "num_layers":32,
62
+ "vocab_size":50048,
63
+ "hidden_size":1600,
64
+ "num_attention_heads":25,
65
+ "embedding_dropout_prob":0.1,
66
+ "attention_dropout_prob":0.1,
67
+ "output_dropout_prob":0.1,
68
+ "max_sequence_length":512,
69
+ "max_memory_length":512,
70
+ "latent_size":256,
71
+ "checkpoint_activations":false,
72
+ "checkpoint_num_layers":1,
73
+ "parallel_output":true,
74
+ "relative_encoding":true
75
  }
76
+ }
encoder/vocab.txt → vocab.txt RENAMED
File without changes