mrzlab630 commited on
Commit
b9e381a
1 Parent(s): c32fa21

Update config.json

Browse files
Files changed (1) hide show
  1. config.json +25 -14
config.json CHANGED
@@ -1,22 +1,33 @@
1
  {
2
- "_name_or_path": "mrzlab630/lora-alpaca-trading-candles",
 
 
3
  "architectures": [
4
  "mrzlabForQuestionAnswering"
5
  ],
6
- "bos_token_id": 1,
7
- "eos_token_id": 2,
8
- "hidden_act": "silu",
 
 
 
 
 
 
9
  "hidden_size": 4096,
10
- "initializer_range": 0.02,
11
- "intermediate_size": 11008,
12
- "model_type": "llama",
13
  "num_attention_heads": 32,
14
  "num_hidden_layers": 32,
15
- "pad_token_id": 0,
16
- "rms_norm_eps": 1e-06,
17
- "tie_word_embeddings": false,
18
- "torch_dtype": "float16",
19
  "transformers_version": "4.28.0.dev0",
20
- "use_cache": true,
21
- "vocab_size": 32000
22
- }
 
 
 
 
 
 
 
 
1
  {
2
+ "_name_or_path": "mrzlab630/lora-alpaca-trading-candles",
3
+ "model_type": "Llama",
4
+ "finetuning_task": "lora-alpaca-trading-candles",
5
  "architectures": [
6
  "mrzlabForQuestionAnswering"
7
  ],
8
+ "pretrained_model_name_or_path": "mrzlab630/weights_Llama_7b",
9
+ "num_labels": 1,
10
+ "id2label": {
11
+ "0": "LABEL_0"
12
+ },
13
+ "label2id": {
14
+ "LABEL_0": 0
15
+ },
16
+ "vocab_size": 32000,
17
  "hidden_size": 4096,
 
 
 
18
  "num_attention_heads": 32,
19
  "num_hidden_layers": 32,
20
+ "initializer_range": 0.02,
21
+ "layer_norm_eps": 1e-12,
22
+ "gradient_checkpointing": false,
 
23
  "transformers_version": "4.28.0.dev0",
24
+ "use_cache": true
25
+ }
26
+
27
+
28
+
29
+
30
+
31
+
32
+
33
+