renede commited on
Commit
68c5328
1 Parent(s): 2e1b299

Update config.json

Browse files
Files changed (1) hide show
  1. config.json +13 -17
config.json CHANGED
@@ -2,17 +2,15 @@
2
  "_name_or_path": "tiiuae/falcon-7b",
3
  "alibi": false,
4
  "apply_residual_connection_post_layernorm": false,
5
- "architectures": [
6
- "FalconForCausalLM"
7
- ],
8
  "attention_dropout": 0.0,
9
  "auto_map": {
10
- "AutoConfig": "tiiuae/falcon-7b--configuration_falcon.FalconConfig",
11
- "AutoModel": "tiiuae/falcon-7b--modeling_falcon.FalconModel",
12
- "AutoModelForCausalLM": "tiiuae/falcon-7b--modeling_falcon.FalconForCausalLM",
13
- "AutoModelForQuestionAnswering": "tiiuae/falcon-7b--modeling_falcon.FalconForQuestionAnswering",
14
- "AutoModelForSequenceClassification": "tiiuae/falcon-7b--modeling_falcon.FalconForSequenceClassification",
15
- "AutoModelForTokenClassification": "tiiuae/falcon-7b--modeling_falcon.FalconForTokenClassification"
16
  },
17
  "bias": false,
18
  "bos_token_id": 11,
@@ -20,16 +18,14 @@
20
  "hidden_dropout": 0.0,
21
  "hidden_size": 4544,
22
  "initializer_range": 0.02,
23
- "layer_norm_epsilon": 1e-05,
24
- "model_type": "falcon",
25
  "multi_query": true,
26
- "new_decoder_architecture": false,
27
- "num_attention_heads": 71,
28
- "num_hidden_layers": 32,
29
- "num_kv_heads": 71,
30
  "parallel_attn": true,
31
  "torch_dtype": "float16",
32
- "transformers_version": "4.30.0.dev0",
33
  "use_cache": true,
34
  "vocab_size": 65024
35
- }
 
2
  "_name_or_path": "tiiuae/falcon-7b",
3
  "alibi": false,
4
  "apply_residual_connection_post_layernorm": false,
5
+ "architectures": ["RWForCausalLM"],
 
 
6
  "attention_dropout": 0.0,
7
  "auto_map": {
8
+ "AutoConfig": "configuration_RW.RWConfig",
9
+ "AutoModel": "modelling_RW.RWModel",
10
+ "AutoModelForSequenceClassification": "modelling_RW.RWForSequenceClassification",
11
+ "AutoModelForTokenClassification": "modelling_RW.RWForTokenClassification",
12
+ "AutoModelForQuestionAnswering": "modelling_RW.RWForQuestionAnswering",
13
+ "AutoModelForCausalLM": "modelling_RW.RWForCausalLM"
14
  },
15
  "bias": false,
16
  "bos_token_id": 11,
 
18
  "hidden_dropout": 0.0,
19
  "hidden_size": 4544,
20
  "initializer_range": 0.02,
21
+ "layer_norm_epsilon": 1e-5,
22
+ "model_type": "RefinedWebModel",
23
  "multi_query": true,
24
+ "n_head": 71,
25
+ "n_layer": 32,
 
 
26
  "parallel_attn": true,
27
  "torch_dtype": "float16",
28
+ "transformers_version": "4.30.2",
29
  "use_cache": true,
30
  "vocab_size": 65024
31
+ }