Godwinlyamba commited on
Commit
3ea2ed5
·
verified ·
1 Parent(s): a71bd76

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
chat_template.jinja ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- macro render_content(msg) -%}
2
+ {%- set c = msg.get('content') -%}
3
+ {%- if c is string -%}
4
+ {{ c }}
5
+ {%- elif c is not none -%}
6
+ {% for content in c -%}
7
+ {% if content['type'] == 'image' or content['type'] == 'image_url' -%}
8
+ <|media_begin|>image<|media_content|><|media_pad|><|media_end|>
9
+ {% elif content['type'] == 'video' or content['type']== 'video_url'-%}
10
+ <|kimi_k25_video_placeholder|>
11
+ {% else -%}
12
+ {{ content['text'] }}
13
+ {%- endif -%}
14
+ {%- endfor -%}
15
+ {%- endif -%}
16
+ {%- endmacro -%}
17
+
18
+ {% macro set_roles(message) -%}
19
+ {%- set role_name = message.get('name') or message['role'] -%}
20
+ {%- if message['role'] == 'user' -%}
21
+ <|im_user|>{{role_name}}<|im_middle|>
22
+ {%- elif message['role'] == 'assistant' -%}
23
+ <|im_assistant|>{{role_name}}<|im_middle|>
24
+ {%- else -%}
25
+ <|im_system|>{{role_name}}<|im_middle|>
26
+ {%- endif -%}
27
+ {%- endmacro -%}
28
+
29
+
30
+ {%- macro render_toolcalls(message) -%}
31
+ <|tool_calls_section_begin|>
32
+ {%- for tool_call in message['tool_calls'] -%}
33
+ {%- set formatted_id = tool_call['id'] -%}
34
+ <|tool_call_begin|>{{ formatted_id }}<|tool_call_argument_begin|>{% if tool_call['function']['arguments'] is string %}{{ tool_call['function']['arguments'] }}{% else %}{{ tool_call['function']['arguments'] | tojson }}{% endif %}<|tool_call_end|>
35
+ {%- endfor -%}
36
+ <|tool_calls_section_end|>
37
+ {%- endmacro -%}
38
+
39
+
40
+ {%- set preserve_thinking = preserve_thinking | default(false) -%}
41
+ {# Find last non-tool-call assistant message. If preserve_thinking, keep -1 so hist is empty and all msgs use suffix (retain reasoning). #}
42
+ {%- set ns = namespace(last_non_tool_call_assistant_msg=-1) -%}
43
+ {%- if not preserve_thinking -%}
44
+ {%- for idx in range(messages|length-1, -1, -1) -%}
45
+ {%- if messages[idx]['role'] == 'assistant' and not messages[idx].get('tool_calls') -%}
46
+ {%- set ns.last_non_tool_call_assistant_msg = idx -%}
47
+ {%- break -%}
48
+ {%- endif -%}
49
+ {%- endfor -%}
50
+ {%- endif -%}
51
+
52
+ {# split all messages into history & suffix, reasoning_content in suffix should be reserved.#}
53
+ {%- set hist_msgs = messages[:ns.last_non_tool_call_assistant_msg+1] -%}
54
+ {%- set suffix_msgs = messages[ns.last_non_tool_call_assistant_msg+1:] -%}
55
+
56
+ {%- if tools -%}
57
+ {%- if tools_ts_str -%}
58
+ <|im_system|>tool_declare<|im_middle|>{{ tools_ts_str }}<|im_end|>
59
+ {%- else -%}
60
+ <|im_system|>tool_declare<|im_middle|>{{ tools | tojson(separators=(',', ':')) }}<|im_end|>
61
+ {%- endif -%}
62
+ {%- endif -%}
63
+
64
+
65
+ {%- for message in hist_msgs -%}
66
+ {{set_roles(message)}}
67
+ {%- if message['role'] == 'assistant' -%}
68
+ <think></think>{{render_content(message)}}
69
+ {%- if message.get('tool_calls') -%}
70
+ {{render_toolcalls(message)}}
71
+ {%- endif -%}
72
+ {%- elif message['role'] == 'tool' -%}
73
+ {%- set tool_call_id = message.tool_call_id -%}
74
+ ## Return of {{ tool_call_id }}
75
+ {{render_content(message)}}
76
+ {%- elif message['content'] is not none -%}
77
+ {{render_content(message)}}
78
+ {%- endif -%}
79
+ <|im_end|>
80
+ {%- endfor -%}
81
+
82
+ {%- for message in suffix_msgs -%}
83
+ {{set_roles(message)}}
84
+ {%- if message['role'] == 'assistant' -%}
85
+ {%- if thinking is defined and thinking is false and preserve_thinking is false -%}
86
+ <think></think>{{render_content(message)}}
87
+ {%- else -%}
88
+ {%- set rc = message.get('reasoning', message.get('reasoning_content', '')) -%}
89
+ <think>{{rc}}</think>{{render_content(message)}}
90
+ {%- endif -%}
91
+ {%- if message.get('tool_calls') -%}
92
+ {{render_toolcalls(message)}}
93
+ {%- endif -%}
94
+ {%- elif message['role'] == 'tool' -%}
95
+ {%- set tool_call_id = message.tool_call_id -%}
96
+ ## Return of {{ tool_call_id }}
97
+ {{render_content(message)}}
98
+ {%- elif message['content'] is not none -%}
99
+ {{render_content(message)}}
100
+ {%- endif -%}
101
+ <|im_end|>
102
+ {%- endfor -%}
103
+
104
+
105
+ {%- if add_generation_prompt -%}
106
+ <|im_assistant|>assistant<|im_middle|>
107
+ {%- if thinking is defined and thinking is false -%}
108
+ <think></think>
109
+ {%- else -%}
110
+ <think>
111
+ {%- endif -%}
112
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "transformers_version": "5.7.0",
3
+ "architectures": [
4
+ "DeepseekV3ForCausalLM"
5
+ ],
6
+ "output_hidden_states": false,
7
+ "return_dict": true,
8
+ "dtype": "bfloat16",
9
+ "chunk_size_feed_forward": 0,
10
+ "is_encoder_decoder": false,
11
+ "id2label": {
12
+ "0": "LABEL_0",
13
+ "1": "LABEL_1"
14
+ },
15
+ "label2id": {
16
+ "LABEL_0": 0,
17
+ "LABEL_1": 1
18
+ },
19
+ "problem_type": null,
20
+ "vocab_size": 163840,
21
+ "hidden_size": 2048,
22
+ "intermediate_size": 11264,
23
+ "moe_intermediate_size": 1408,
24
+ "num_hidden_layers": 27,
25
+ "num_attention_heads": 16,
26
+ "num_key_value_heads": 16,
27
+ "n_shared_experts": 2,
28
+ "n_routed_experts": 64,
29
+ "routed_scaling_factor": 2.446,
30
+ "kv_lora_rank": 512,
31
+ "q_lora_rank": null,
32
+ "qk_rope_head_dim": 64,
33
+ "v_head_dim": 128,
34
+ "qk_nope_head_dim": 128,
35
+ "n_group": 1,
36
+ "topk_group": 1,
37
+ "num_experts_per_tok": 6,
38
+ "first_k_dense_replace": 1,
39
+ "norm_topk_prob": true,
40
+ "hidden_act": "silu",
41
+ "max_position_embeddings": 131072,
42
+ "initializer_range": 0.02,
43
+ "rms_norm_eps": 1e-05,
44
+ "use_cache": false,
45
+ "pad_token_id": 163839,
46
+ "bos_token_id": 163584,
47
+ "eos_token_id": 163585,
48
+ "pretraining_tp": 1,
49
+ "tie_word_embeddings": false,
50
+ "rope_parameters": {
51
+ "rope_theta": 800000.0,
52
+ "rope_type": "default"
53
+ },
54
+ "rope_interleave": true,
55
+ "attention_bias": false,
56
+ "attention_dropout": 0.0,
57
+ "qk_head_dim": 192,
58
+ "head_dim": 64,
59
+ "_name_or_path": "/ephemeral/ubuntu/distil-metadata/model_b",
60
+ "aux_loss_alpha": 0.001,
61
+ "ep_size": 1,
62
+ "model_type": "deepseek_v3",
63
+ "moe_layer_freq": 1,
64
+ "num_nextn_predict_layers": 1,
65
+ "scoring_func": "sigmoid",
66
+ "seq_aux": true,
67
+ "topk_method": "noaux_tc",
68
+ "output_attentions": false,
69
+ "torch_dtype": "bfloat16"
70
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 163584,
4
+ "eos_token_id": 163585,
5
+ "pad_token_id": 163839,
6
+ "transformers_version": "5.4.0"
7
+ }
model-00001-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f7ba1cacbb702929f48aaa85ee6cddd5ff5d8432804ee4c15aa185c88fca2f4
3
+ size 5363330696
model-00002-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc990d8e89d335b25711b46ccc3ab4a4f9f35e70b52d2ab7fd6a7eece11c6493
3
+ size 5365192952
model-00003-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:057da790cfbd130f69d12e3e28414cf2f9d40fc23c7e6121c22dca8d2996b05f
3
+ size 5364153584
model-00004-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:140426385bc1e00bbbd3c04549a6fc5bd742c88859ee017a236f5c18f3280e74
3
+ size 5364153784
model-00005-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05169f6c14f4d6499622475aac5df53b2d7b2f89cec495b28f82e98b6090a81e
3
+ size 5365192416
model-00006-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17f94478ca6aff2086b3bc2881a7438beb2047f2c1b67913a3bb596aab15837c
3
+ size 5098857392
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b39c9bd00faa509721177f36bdd132f52924f67ed12394bf38b222de87aeee91
3
+ size 19545546
tokenizer_config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoTokenizer": [
4
+ "tokenization_kimi.TikTokenTokenizer",
5
+ null
6
+ ]
7
+ },
8
+ "backend": "tokenizers",
9
+ "bos_token": "[BOS]",
10
+ "clean_up_tokenization_spaces": false,
11
+ "eos_token": "[EOS]",
12
+ "extra_special_tokens": [
13
+ "<|im_end|>",
14
+ "<|im_user|>",
15
+ "<|im_assistant|>",
16
+ "<|im_system|>",
17
+ "<|im_middle|>",
18
+ "<|media_start|>",
19
+ "<|media_content|>",
20
+ "<|media_end|>",
21
+ "<|media_pad|>"
22
+ ],
23
+ "is_local": false,
24
+ "local_files_only": false,
25
+ "model_max_length": 1000000000000000019884624838656,
26
+ "pad_token": "[PAD]",
27
+ "tokenizer_class": "TokenizersBackend",
28
+ "unk_token": "[UNK]",
29
+ "chat_template": "{%- macro render_content(msg) -%}\n {%- set c = msg.get('content') -%}\n {%- if c is string -%}\n {{ c }}\n {%- elif c is not none -%}\n {% for content in c -%}\n {% if content['type'] == 'image' or content['type'] == 'image_url' -%}\n <|media_begin|>image<|media_content|><|media_pad|><|media_end|>\n {% elif content['type'] == 'video' or content['type']== 'video_url'-%}\n <|kimi_k25_video_placeholder|>\n {% else -%}\n {{ content['text'] }}\n {%- endif -%}\n {%- endfor -%}\n {%- endif -%}\n{%- endmacro -%}\n\n{% macro set_roles(message) -%}\n {%- set role_name = message.get('name') or message['role'] -%}\n {%- if message['role'] == 'user' -%}\n <|im_user|>{{role_name}}<|im_middle|>\n {%- elif message['role'] == 'assistant' -%}\n <|im_assistant|>{{role_name}}<|im_middle|>\n {%- else -%}\n <|im_system|>{{role_name}}<|im_middle|>\n {%- endif -%}\n{%- endmacro -%}\n\n\n{%- macro render_toolcalls(message) -%}\n <|tool_calls_section_begin|>\n {%- for tool_call in message['tool_calls'] -%}\n {%- set formatted_id = tool_call['id'] -%}\n <|tool_call_begin|>{{ formatted_id }}<|tool_call_argument_begin|>{% if tool_call['function']['arguments'] is string %}{{ tool_call['function']['arguments'] }}{% else %}{{ tool_call['function']['arguments'] | tojson }}{% endif %}<|tool_call_end|>\n {%- endfor -%}\n <|tool_calls_section_end|>\n{%- endmacro -%}\n\n\n{%- set preserve_thinking = preserve_thinking | default(false) -%}\n{# Find last non-tool-call assistant message. If preserve_thinking, keep -1 so hist is empty and all msgs use suffix (retain reasoning). #}\n{%- set ns = namespace(last_non_tool_call_assistant_msg=-1) -%}\n{%- if not preserve_thinking -%}\n{%- for idx in range(messages|length-1, -1, -1) -%}\n {%- if messages[idx]['role'] == 'assistant' and not messages[idx].get('tool_calls') -%}\n {%- set ns.last_non_tool_call_assistant_msg = idx -%}\n {%- break -%}\n {%- endif -%}\n{%- endfor -%}\n{%- endif -%}\n\n{# split all messages into history & suffix, reasoning_content in suffix should be reserved.#}\n{%- set hist_msgs = messages[:ns.last_non_tool_call_assistant_msg+1] -%}\n{%- set suffix_msgs = messages[ns.last_non_tool_call_assistant_msg+1:] -%}\n\n{%- if tools -%}\n {%- if tools_ts_str -%}\n <|im_system|>tool_declare<|im_middle|>{{ tools_ts_str }}<|im_end|>\n {%- else -%}\n <|im_system|>tool_declare<|im_middle|>{{ tools | tojson(separators=(',', ':')) }}<|im_end|>\n {%- endif -%}\n{%- endif -%}\n\n \n{%- for message in hist_msgs -%}\n {{set_roles(message)}}\n {%- if message['role'] == 'assistant' -%}\n <think></think>{{render_content(message)}}\n {%- if message.get('tool_calls') -%}\n {{render_toolcalls(message)}}\n {%- endif -%}\n {%- elif message['role'] == 'tool' -%}\n {%- set tool_call_id = message.tool_call_id -%}\n ## Return of {{ tool_call_id }}\n{{render_content(message)}}\n {%- elif message['content'] is not none -%}\n {{render_content(message)}}\n {%- endif -%}\n <|im_end|>\n{%- endfor -%}\n\n{%- for message in suffix_msgs -%}\n {{set_roles(message)}}\n {%- if message['role'] == 'assistant' -%}\n {%- if thinking is defined and thinking is false and preserve_thinking is false -%}\n <think></think>{{render_content(message)}}\n {%- else -%}\n {%- set rc = message.get('reasoning', message.get('reasoning_content', '')) -%}\n <think>{{rc}}</think>{{render_content(message)}}\n {%- endif -%}\n {%- if message.get('tool_calls') -%}\n {{render_toolcalls(message)}}\n {%- endif -%}\n {%- elif message['role'] == 'tool' -%}\n {%- set tool_call_id = message.tool_call_id -%}\n ## Return of {{ tool_call_id }}\n{{render_content(message)}}\n {%- elif message['content'] is not none -%}\n {{render_content(message)}}\n {%- endif -%}\n <|im_end|>\n{%- endfor -%}\n\n\n{%- if add_generation_prompt -%}\n <|im_assistant|>assistant<|im_middle|>\n {%- if thinking is defined and thinking is false -%}\n <think></think>\n {%- else -%}\n <think>\n {%- endif -%}\n{%- endif -%}"
30
+ }