slackingfred
commited on
Commit
•
de13652
1
Parent(s):
e325dc6
Save last model
Browse files- config.json +31 -0
- generation_config.json +6 -0
- model.safetensors +3 -0
- optimizer.pt +3 -0
- rng_state.pth +3 -0
- run_config.json +77 -0
- scheduler.pt +3 -0
- tokenizer.json +0 -0
- tokenizer_config.json +35 -0
- trainer_state.json +0 -0
- training_args.bin +3 -0
config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "deepseek-ai/deepseek-coder-1.3b-instruct",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 32013,
|
9 |
+
"eos_token_id": 32021,
|
10 |
+
"hidden_act": "silu",
|
11 |
+
"hidden_size": 2048,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 5504,
|
14 |
+
"max_position_embeddings": 16384,
|
15 |
+
"model_type": "llama",
|
16 |
+
"num_attention_heads": 16,
|
17 |
+
"num_hidden_layers": 24,
|
18 |
+
"num_key_value_heads": 16,
|
19 |
+
"pretraining_tp": 1,
|
20 |
+
"rms_norm_eps": 1e-06,
|
21 |
+
"rope_scaling": {
|
22 |
+
"factor": 4.0,
|
23 |
+
"type": "linear"
|
24 |
+
},
|
25 |
+
"rope_theta": 100000,
|
26 |
+
"tie_word_embeddings": false,
|
27 |
+
"torch_dtype": "float16",
|
28 |
+
"transformers_version": "4.39.0.dev0",
|
29 |
+
"use_cache": true,
|
30 |
+
"vocab_size": 32256
|
31 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 32013,
|
4 |
+
"eos_token_id": 32021,
|
5 |
+
"transformers_version": "4.39.0.dev0"
|
6 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5fef71ce0ddb4f14be9ebf0332bf1147d0f88c19a280a3cb8e16e736175ddfa8
|
3 |
+
size 2692968904
|
optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eb7212d1a5346112ff3ca6be3074c69fe24ca33344eddf25190e0ea85921bb54
|
3 |
+
size 2699040122
|
rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ecbe4210da6226b088a12a1c5bbee9831d038ebcd718dd20565a6867997b182e
|
3 |
+
size 14244
|
run_config.json
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"base_model_id": "deepseek-ai/deepseek-coder-1.3b-instruct",
|
3 |
+
"quantitize": "fp16",
|
4 |
+
"dataset": "Arithmetic_Hard",
|
5 |
+
"data_collator": "DataCollatorForLanguageModeling",
|
6 |
+
"peft_config": {
|
7 |
+
"lora": {
|
8 |
+
"r": 32,
|
9 |
+
"lora_alpha": 64,
|
10 |
+
"target_modules": [
|
11 |
+
"q_proj",
|
12 |
+
"k_proj",
|
13 |
+
"v_proj",
|
14 |
+
"o_proj",
|
15 |
+
"gate_proj",
|
16 |
+
"up_proj",
|
17 |
+
"down_proj"
|
18 |
+
],
|
19 |
+
"bias": "none",
|
20 |
+
"lora_dropout": 0.05,
|
21 |
+
"task_type": "CAUSAL_LM"
|
22 |
+
},
|
23 |
+
"lora_large": {
|
24 |
+
"r": 128,
|
25 |
+
"lora_alpha": 256,
|
26 |
+
"target_modules": [
|
27 |
+
"q_proj",
|
28 |
+
"k_proj",
|
29 |
+
"v_proj",
|
30 |
+
"o_proj",
|
31 |
+
"gate_proj",
|
32 |
+
"up_proj",
|
33 |
+
"down_proj"
|
34 |
+
],
|
35 |
+
"bias": "none",
|
36 |
+
"lora_dropout": 0.05,
|
37 |
+
"task_type": "CAUSAL_LM"
|
38 |
+
},
|
39 |
+
"p_tuning": {
|
40 |
+
"num_virtual_tokens": 16,
|
41 |
+
"num_transformer_submodules": 1,
|
42 |
+
"token_dim": 2048,
|
43 |
+
"encoder_hidden_size": 2048,
|
44 |
+
"task_type": "CAUSAL_LM"
|
45 |
+
}
|
46 |
+
},
|
47 |
+
"training_args": {
|
48 |
+
"warmup_steps": 500,
|
49 |
+
"per_device_train_batch_size": 4,
|
50 |
+
"per_device_eval_batch_size": 4,
|
51 |
+
"gradient_accumulation_steps": 1,
|
52 |
+
"max_steps": 100000,
|
53 |
+
"learning_rate": 0.0001,
|
54 |
+
"optim": "paged_adamw_8bit",
|
55 |
+
"logging_steps": 100,
|
56 |
+
"save_strategy": "steps",
|
57 |
+
"save_total_limit": 5,
|
58 |
+
"save_steps": 2500,
|
59 |
+
"evaluation_strategy": "steps",
|
60 |
+
"eval_steps": 2500,
|
61 |
+
"weight_decay": 0.01,
|
62 |
+
"report_to": "wandb",
|
63 |
+
"dataloader_num_workers": 4,
|
64 |
+
"load_best_model_at_end": true,
|
65 |
+
"fp16": false,
|
66 |
+
"output_dir": "runs/deepseek-full-hard",
|
67 |
+
"logging_dir": "runs/deepseek-full-hard/logs"
|
68 |
+
},
|
69 |
+
"tokenizer": {
|
70 |
+
"tokenize_config": {
|
71 |
+
"truncation": true,
|
72 |
+
"max_length": 512,
|
73 |
+
"padding": "max_length"
|
74 |
+
},
|
75 |
+
"prompt_template": "config/qa_template.txt"
|
76 |
+
}
|
77 |
+
}
|
scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3d6c5b93324fa72d02f994789c5634c26492fde161591c5c12fb60aea61a68f2
|
3 |
+
size 1064
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"bos_token": {
|
5 |
+
"__type": "AddedToken",
|
6 |
+
"content": "<|begin▁of▁sentence|>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": true,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"clean_up_tokenization_spaces": false,
|
13 |
+
"eos_token": {
|
14 |
+
"__type": "AddedToken",
|
15 |
+
"content": "<|EOT|>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": true,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false
|
20 |
+
},
|
21 |
+
"legacy": true,
|
22 |
+
"model_max_length": 16384,
|
23 |
+
"pad_token": {
|
24 |
+
"__type": "AddedToken",
|
25 |
+
"content": "<|end▁of▁sentence|>",
|
26 |
+
"lstrip": false,
|
27 |
+
"normalized": true,
|
28 |
+
"rstrip": false,
|
29 |
+
"single_word": false
|
30 |
+
},
|
31 |
+
"sp_model_kwargs": {},
|
32 |
+
"unk_token": null,
|
33 |
+
"tokenizer_class": "LlamaTokenizerFast",
|
34 |
+
"chat_template": "{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}"
|
35 |
+
}
|
trainer_state.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:841cb7e6f9fc4f19db87ce9d0bb0834d6ed9f6df3d14ff1ec46778f5841bf8d1
|
3 |
+
size 4856
|