slackingfred
commited on
Commit
•
4d8171b
1
Parent(s):
c3e0053
Add required configs from math-deepseek-lora-arith-simple-hard-5-step
Browse files- command.txt +1 -0
- config.json +28 -0
- run_config.json +74 -0
- tokenizer.json +0 -0
- tokenizer_config.json +35 -0
command.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
main.py --peft_type lora --model_name deepseek --peft_ckpt runs/deepseek-curri-4/checkpoint-30000
|
config.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"LlamaForCausalLM"
|
4 |
+
],
|
5 |
+
"bos_token_id": 32013,
|
6 |
+
"eos_token_id": 32021,
|
7 |
+
"hidden_act": "silu",
|
8 |
+
"hidden_size": 2048,
|
9 |
+
"initializer_range": 0.02,
|
10 |
+
"intermediate_size": 5504,
|
11 |
+
"max_position_embeddings": 16384,
|
12 |
+
"model_type": "llama",
|
13 |
+
"num_attention_heads": 16,
|
14 |
+
"num_hidden_layers": 24,
|
15 |
+
"num_key_value_heads": 16,
|
16 |
+
"pretraining_tp": 1,
|
17 |
+
"rms_norm_eps": 1e-06,
|
18 |
+
"rope_scaling": {
|
19 |
+
"factor": 4.0,
|
20 |
+
"type": "linear"
|
21 |
+
},
|
22 |
+
"rope_theta": 100000,
|
23 |
+
"tie_word_embeddings": false,
|
24 |
+
"torch_dtype": "bfloat16",
|
25 |
+
"transformers_version": "4.33.1",
|
26 |
+
"use_cache": true,
|
27 |
+
"vocab_size": 32256
|
28 |
+
}
|
run_config.json
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"base_model_id": "deepseek-ai/deepseek-coder-1.3b-instruct",
|
3 |
+
"quantitize": "fp16",
|
4 |
+
"dataset": "Arithmetic_Hard",
|
5 |
+
"data_collator": "DataCollatorForLanguageModeling",
|
6 |
+
"peft_config": {
|
7 |
+
"lora": {
|
8 |
+
"r": 32,
|
9 |
+
"lora_alpha": 64,
|
10 |
+
"target_modules": [
|
11 |
+
"q_proj",
|
12 |
+
"k_proj",
|
13 |
+
"v_proj",
|
14 |
+
"o_proj",
|
15 |
+
"gate_proj",
|
16 |
+
"up_proj",
|
17 |
+
"down_proj"
|
18 |
+
],
|
19 |
+
"bias": "none",
|
20 |
+
"lora_dropout": 0.05,
|
21 |
+
"task_type": "CAUSAL_LM"
|
22 |
+
},
|
23 |
+
"lora_large": {
|
24 |
+
"r": 128,
|
25 |
+
"lora_alpha": 256,
|
26 |
+
"target_modules": [
|
27 |
+
"q_proj",
|
28 |
+
"k_proj",
|
29 |
+
"v_proj",
|
30 |
+
"o_proj",
|
31 |
+
"gate_proj",
|
32 |
+
"up_proj",
|
33 |
+
"down_proj"
|
34 |
+
],
|
35 |
+
"bias": "none",
|
36 |
+
"lora_dropout": 0.05,
|
37 |
+
"task_type": "CAUSAL_LM"
|
38 |
+
},
|
39 |
+
"p_tuning": {
|
40 |
+
"num_virtual_tokens": 16,
|
41 |
+
"num_transformer_submodules": 1,
|
42 |
+
"token_dim": 2048,
|
43 |
+
"encoder_hidden_size": 2048,
|
44 |
+
"task_type": "CAUSAL_LM"
|
45 |
+
}
|
46 |
+
},
|
47 |
+
"training_args": {
|
48 |
+
"warmup_steps": 500,
|
49 |
+
"per_device_train_batch_size": 4,
|
50 |
+
"per_device_eval_batch_size": 4,
|
51 |
+
"gradient_accumulation_steps": 1,
|
52 |
+
"max_steps": 30000,
|
53 |
+
"learning_rate": 0.0001,
|
54 |
+
"optim": "adamw_torch",
|
55 |
+
"logging_steps": 100,
|
56 |
+
"save_strategy": "steps",
|
57 |
+
"save_steps": 2000,
|
58 |
+
"evaluation_strategy": "steps",
|
59 |
+
"eval_steps": 1000,
|
60 |
+
"weight_decay": 0.01,
|
61 |
+
"report_to": "wandb",
|
62 |
+
"fp16": true,
|
63 |
+
"output_dir": "runs/deepseek-curri-5",
|
64 |
+
"logging_dir": "runs/deepseek-curri-5/logs"
|
65 |
+
},
|
66 |
+
"tokenizer": {
|
67 |
+
"tokenize_config": {
|
68 |
+
"truncation": true,
|
69 |
+
"max_length": 512,
|
70 |
+
"padding": "max_length"
|
71 |
+
},
|
72 |
+
"prompt_template": "config/qa_template.txt"
|
73 |
+
}
|
74 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"bos_token": {
|
5 |
+
"__type": "AddedToken",
|
6 |
+
"content": "<|begin▁of▁sentence|>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": true,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"clean_up_tokenization_spaces": false,
|
13 |
+
"eos_token": {
|
14 |
+
"__type": "AddedToken",
|
15 |
+
"content": "<|EOT|>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": true,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false
|
20 |
+
},
|
21 |
+
"legacy": true,
|
22 |
+
"model_max_length": 16384,
|
23 |
+
"pad_token": {
|
24 |
+
"__type": "AddedToken",
|
25 |
+
"content": "<|end▁of▁sentence|>",
|
26 |
+
"lstrip": false,
|
27 |
+
"normalized": true,
|
28 |
+
"rstrip": false,
|
29 |
+
"single_word": false
|
30 |
+
},
|
31 |
+
"sp_model_kwargs": {},
|
32 |
+
"unk_token": null,
|
33 |
+
"tokenizer_class": "LlamaTokenizerFast",
|
34 |
+
"chat_template": "{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}"
|
35 |
+
}
|