dacorvo HF Staff commited on
Commit
5d907a4
·
verified ·
1 Parent(s): c564241

Synchronizing local compiler cache.

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +16 -0
  2. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/60feecaa0c4c075e2f3e46a3f55d9a273f0ddd75a0ecf64e4ae27352e0819506/1c4ee5d7dc71b8843fca.json +87 -0
  3. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/af58eb15d8e02338dc2f2e880e9c6ec803a98278914b3606acdcc252e7e18429/026fe44014d3f650a32e.json +95 -0
  4. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/af58eb15d8e02338dc2f2e880e9c6ec803a98278914b3606acdcc252e7e18429/2ad8fd5368e5c42f132c.json +95 -0
  5. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/af58eb15d8e02338dc2f2e880e9c6ec803a98278914b3606acdcc252e7e18429/66877d91fb0121840163.json +95 -0
  6. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/af58eb15d8e02338dc2f2e880e9c6ec803a98278914b3606acdcc252e7e18429/ae5d00317f4b117f94a8.json +95 -0
  7. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/af58eb15d8e02338dc2f2e880e9c6ec803a98278914b3606acdcc252e7e18429/d9b9f628f0dee7a926d6.json +95 -0
  8. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/07552dc6c695df3ea557.json +95 -0
  9. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/25770924bad8fff9ec23.json +95 -0
  10. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/4f788775782f89b676c9.json +95 -0
  11. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/7190036e8ed3be94399f.json +95 -0
  12. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/9054b94f39e5c374b6b8.json +95 -0
  13. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/bed88d82f075f516941d.json +95 -0
  14. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/cf5f2cf31cc338bcdce9.json +95 -0
  15. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/d06a9ca97bbded610b72.json +95 -0
  16. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/d7e075cbb2c6bd78d6b4.json +95 -0
  17. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/e189289909e4808416f7.json +95 -0
  18. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/e79a64794f75d8045060.json +95 -0
  19. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/qwen3/Qwen/Qwen3-Embedding-0.6B/1c4ee5d7dc71b8843fca.json +87 -0
  20. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/qwen3/Qwen/Qwen3-Embedding-4B/07552dc6c695df3ea557.json +95 -0
  21. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/qwen3/Qwen/Qwen3-Embedding-8B/d9b9f628f0dee7a926d6.json +95 -0
  22. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/smollm3/HuggingFaceTB/SmolLM3-3B/588f7836eb16c9483d90.json +134 -0
  23. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5/cf6b9a360dcf294104671106bae2adbd9fd291823bb60a351883163684073231/22277b72a5862009f452.json +63 -0
  24. neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5/llama/unsloth/Llama-3.2-1B-Instruct/22277b72a5862009f452.json +63 -0
  25. neuronxcc-2.21.33363.0+82129205/MODULE_14268084bdb93e2af8ea+fb4cc044/compile_flags.json +1 -0
  26. neuronxcc-2.21.33363.0+82129205/MODULE_14268084bdb93e2af8ea+fb4cc044/model.done +0 -0
  27. neuronxcc-2.21.33363.0+82129205/MODULE_14268084bdb93e2af8ea+fb4cc044/model.hlo_module.pb +3 -0
  28. neuronxcc-2.21.33363.0+82129205/MODULE_14268084bdb93e2af8ea+fb4cc044/model.neff +3 -0
  29. neuronxcc-2.21.33363.0+82129205/MODULE_17b4453648b482087f44+fb4cc044/compile_flags.json +1 -0
  30. neuronxcc-2.21.33363.0+82129205/MODULE_17b4453648b482087f44+fb4cc044/model.done +0 -0
  31. neuronxcc-2.21.33363.0+82129205/MODULE_17b4453648b482087f44+fb4cc044/model.hlo_module.pb +3 -0
  32. neuronxcc-2.21.33363.0+82129205/MODULE_17b4453648b482087f44+fb4cc044/model.neff +3 -0
  33. neuronxcc-2.21.33363.0+82129205/MODULE_1efaefc590ce7ee07e97+fb4cc044/compile_flags.json +1 -0
  34. neuronxcc-2.21.33363.0+82129205/MODULE_1efaefc590ce7ee07e97+fb4cc044/model.done +0 -0
  35. neuronxcc-2.21.33363.0+82129205/MODULE_1efaefc590ce7ee07e97+fb4cc044/model.hlo_module.pb +3 -0
  36. neuronxcc-2.21.33363.0+82129205/MODULE_1efaefc590ce7ee07e97+fb4cc044/model.neff +3 -0
  37. neuronxcc-2.21.33363.0+82129205/MODULE_2828c4ae6bc360cc555f+fb4cc044/compile_flags.json +1 -0
  38. neuronxcc-2.21.33363.0+82129205/MODULE_2828c4ae6bc360cc555f+fb4cc044/model.done +0 -0
  39. neuronxcc-2.21.33363.0+82129205/MODULE_2828c4ae6bc360cc555f+fb4cc044/model.hlo_module.pb +3 -0
  40. neuronxcc-2.21.33363.0+82129205/MODULE_2828c4ae6bc360cc555f+fb4cc044/model.neff +3 -0
  41. neuronxcc-2.21.33363.0+82129205/MODULE_2da63caa31e7595bc07f+fb4cc044/model.neff +1 -1
  42. neuronxcc-2.21.33363.0+82129205/MODULE_32f3df92f722f8d61840+fb4cc044/compile_flags.json +1 -0
  43. neuronxcc-2.21.33363.0+82129205/MODULE_32f3df92f722f8d61840+fb4cc044/model.done +0 -0
  44. neuronxcc-2.21.33363.0+82129205/MODULE_32f3df92f722f8d61840+fb4cc044/model.hlo_module.pb +3 -0
  45. neuronxcc-2.21.33363.0+82129205/MODULE_32f3df92f722f8d61840+fb4cc044/model.neff +3 -0
  46. neuronxcc-2.21.33363.0+82129205/MODULE_3683fa1d292af356adae+fb4cc044/compile_flags.json +1 -0
  47. neuronxcc-2.21.33363.0+82129205/MODULE_3683fa1d292af356adae+fb4cc044/model.done +0 -0
  48. neuronxcc-2.21.33363.0+82129205/MODULE_3683fa1d292af356adae+fb4cc044/model.hlo_module.pb +3 -0
  49. neuronxcc-2.21.33363.0+82129205/MODULE_3683fa1d292af356adae+fb4cc044/model.neff +3 -0
  50. neuronxcc-2.21.33363.0+82129205/MODULE_4effee1c1788c6eeab78+fb4cc044/compile_flags.json +1 -0
.gitattributes CHANGED
@@ -6298,3 +6298,19 @@ neuronxcc-2.21.33363.0+82129205/MODULE_cb86ed7fc724b06726b3+fb4cc044/model.neff
6298
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_a9ea3bcc615b10517bb2+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6299
  neuronxcc-2.21.33363.0+82129205/MODULE_9b4ea40b364ed3edd3ad+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6300
  neuronxcc-2.21.33363.0+82129205/MODULE_55c2fa803a7e3881cef6+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6298
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_a9ea3bcc615b10517bb2+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6299
  neuronxcc-2.21.33363.0+82129205/MODULE_9b4ea40b364ed3edd3ad+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6300
  neuronxcc-2.21.33363.0+82129205/MODULE_55c2fa803a7e3881cef6+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6301
+ neuronxcc-2.21.33363.0+82129205/MODULE_14268084bdb93e2af8ea+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6302
+ neuronxcc-2.21.33363.0+82129205/MODULE_17b4453648b482087f44+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6303
+ neuronxcc-2.21.33363.0+82129205/MODULE_1efaefc590ce7ee07e97+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6304
+ neuronxcc-2.21.33363.0+82129205/MODULE_2828c4ae6bc360cc555f+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6305
+ neuronxcc-2.21.33363.0+82129205/MODULE_32f3df92f722f8d61840+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6306
+ neuronxcc-2.21.33363.0+82129205/MODULE_3683fa1d292af356adae+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6307
+ neuronxcc-2.21.33363.0+82129205/MODULE_4effee1c1788c6eeab78+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6308
+ neuronxcc-2.21.33363.0+82129205/MODULE_4fcdffc44bb528a047f0+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6309
+ neuronxcc-2.21.33363.0+82129205/MODULE_5fdd7ad15e00da4fcd3e+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6310
+ neuronxcc-2.21.33363.0+82129205/MODULE_78602cd5234279501590+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6311
+ neuronxcc-2.21.33363.0+82129205/MODULE_8badd6f6eb69fa108ac8+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6312
+ neuronxcc-2.21.33363.0+82129205/MODULE_93b5314f91b367ce312a+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6313
+ neuronxcc-2.21.33363.0+82129205/MODULE_9c01f09fb84f2b339fc6+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6314
+ neuronxcc-2.21.33363.0+82129205/MODULE_a08dd31a7a105fa45df2+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6315
+ neuronxcc-2.21.33363.0+82129205/MODULE_c913669652fcaa9d5638+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
6316
+ neuronxcc-2.21.33363.0+82129205/MODULE_d3f277cf573c91b9ded8+fb4cc044/model.neff filter=lfs diff=lfs merge=lfs -text
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/60feecaa0c4c075e2f3e46a3f55d9a273f0ddd75a0ecf64e4ae27352e0819506/1c4ee5d7dc71b8843fca.json ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-0.6B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 1024,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 3072,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention"
45
+ ],
46
+ "max_position_embeddings": 32768,
47
+ "max_window_layers": 28,
48
+ "model_type": "qwen3",
49
+ "neuron": {
50
+ "_serialized_key": "NxDNeuronConfig",
51
+ "batch_size": 1,
52
+ "capacity_factor": null,
53
+ "checkpoint_id": "Qwen/Qwen3-Embedding-0.6B",
54
+ "checkpoint_revision": "c54f2e6e80b2d7b7de06f51cec4959f6b3e03418",
55
+ "continuous_batching": false,
56
+ "ep_degree": 1,
57
+ "fused_qkv": true,
58
+ "glu_mlp": true,
59
+ "local_ranks_size": 1,
60
+ "max_batch_size": 1,
61
+ "max_context_length": 8192,
62
+ "max_topk": 256,
63
+ "n_active_tokens": 8192,
64
+ "neuronxcc_version": "2.21.33363.0+82129205",
65
+ "on_device_sampling": false,
66
+ "optimum_neuron_version": "0.4.5.dev2",
67
+ "output_logits": false,
68
+ "pp_degree": 1,
69
+ "sequence_length": 8192,
70
+ "speculation_length": 0,
71
+ "start_rank_id": 0,
72
+ "target": "trn1",
73
+ "torch_dtype": "bfloat16",
74
+ "tp_degree": 1
75
+ },
76
+ "num_attention_heads": 16,
77
+ "num_hidden_layers": 28,
78
+ "num_key_value_heads": 8,
79
+ "rms_norm_eps": 1e-06,
80
+ "rope_scaling": null,
81
+ "rope_theta": 1000000,
82
+ "sliding_window": null,
83
+ "tie_word_embeddings": true,
84
+ "use_cache": true,
85
+ "use_sliding_window": false,
86
+ "vocab_size": 151669
87
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/af58eb15d8e02338dc2f2e880e9c6ec803a98278914b3606acdcc252e7e18429/026fe44014d3f650a32e.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-8B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 4096,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 12288,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention"
53
+ ],
54
+ "max_position_embeddings": 40960,
55
+ "max_window_layers": 36,
56
+ "model_type": "qwen3",
57
+ "neuron": {
58
+ "_serialized_key": "NxDNeuronConfig",
59
+ "batch_size": 16,
60
+ "capacity_factor": null,
61
+ "checkpoint_id": "Qwen/Qwen3-Embedding-8B",
62
+ "checkpoint_revision": "1d8ad4ca9b3dd8059ad90a75d4983776a23d44af",
63
+ "continuous_batching": false,
64
+ "ep_degree": 1,
65
+ "fused_qkv": true,
66
+ "glu_mlp": true,
67
+ "local_ranks_size": 2,
68
+ "max_batch_size": 16,
69
+ "max_context_length": 1024,
70
+ "max_topk": 256,
71
+ "n_active_tokens": 1024,
72
+ "neuronxcc_version": "2.21.33363.0+82129205",
73
+ "on_device_sampling": false,
74
+ "optimum_neuron_version": "0.4.5.dev2",
75
+ "output_logits": false,
76
+ "pp_degree": 1,
77
+ "sequence_length": 1024,
78
+ "speculation_length": 0,
79
+ "start_rank_id": 0,
80
+ "target": "trn1",
81
+ "torch_dtype": "bfloat16",
82
+ "tp_degree": 2
83
+ },
84
+ "num_attention_heads": 32,
85
+ "num_hidden_layers": 36,
86
+ "num_key_value_heads": 8,
87
+ "rms_norm_eps": 1e-06,
88
+ "rope_scaling": null,
89
+ "rope_theta": 1000000,
90
+ "sliding_window": null,
91
+ "tie_word_embeddings": false,
92
+ "use_cache": true,
93
+ "use_sliding_window": false,
94
+ "vocab_size": 151665
95
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/af58eb15d8e02338dc2f2e880e9c6ec803a98278914b3606acdcc252e7e18429/2ad8fd5368e5c42f132c.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-8B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 4096,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 12288,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention"
53
+ ],
54
+ "max_position_embeddings": 40960,
55
+ "max_window_layers": 36,
56
+ "model_type": "qwen3",
57
+ "neuron": {
58
+ "_serialized_key": "NxDNeuronConfig",
59
+ "batch_size": 32,
60
+ "capacity_factor": null,
61
+ "checkpoint_id": "Qwen/Qwen3-Embedding-8B",
62
+ "checkpoint_revision": "1d8ad4ca9b3dd8059ad90a75d4983776a23d44af",
63
+ "continuous_batching": false,
64
+ "ep_degree": 1,
65
+ "fused_qkv": true,
66
+ "glu_mlp": true,
67
+ "local_ranks_size": 8,
68
+ "max_batch_size": 32,
69
+ "max_context_length": 1024,
70
+ "max_topk": 256,
71
+ "n_active_tokens": 1024,
72
+ "neuronxcc_version": "2.21.33363.0+82129205",
73
+ "on_device_sampling": false,
74
+ "optimum_neuron_version": "0.4.5.dev2",
75
+ "output_logits": false,
76
+ "pp_degree": 1,
77
+ "sequence_length": 1024,
78
+ "speculation_length": 0,
79
+ "start_rank_id": 0,
80
+ "target": "trn1",
81
+ "torch_dtype": "bfloat16",
82
+ "tp_degree": 8
83
+ },
84
+ "num_attention_heads": 32,
85
+ "num_hidden_layers": 36,
86
+ "num_key_value_heads": 8,
87
+ "rms_norm_eps": 1e-06,
88
+ "rope_scaling": null,
89
+ "rope_theta": 1000000,
90
+ "sliding_window": null,
91
+ "tie_word_embeddings": false,
92
+ "use_cache": true,
93
+ "use_sliding_window": false,
94
+ "vocab_size": 151665
95
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/af58eb15d8e02338dc2f2e880e9c6ec803a98278914b3606acdcc252e7e18429/66877d91fb0121840163.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-8B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 4096,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 12288,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention"
53
+ ],
54
+ "max_position_embeddings": 40960,
55
+ "max_window_layers": 36,
56
+ "model_type": "qwen3",
57
+ "neuron": {
58
+ "_serialized_key": "NxDNeuronConfig",
59
+ "batch_size": 16,
60
+ "capacity_factor": null,
61
+ "checkpoint_id": "Qwen/Qwen3-Embedding-8B",
62
+ "checkpoint_revision": "1d8ad4ca9b3dd8059ad90a75d4983776a23d44af",
63
+ "continuous_batching": false,
64
+ "ep_degree": 1,
65
+ "fused_qkv": true,
66
+ "glu_mlp": true,
67
+ "local_ranks_size": 8,
68
+ "max_batch_size": 16,
69
+ "max_context_length": 4096,
70
+ "max_topk": 256,
71
+ "n_active_tokens": 4096,
72
+ "neuronxcc_version": "2.21.33363.0+82129205",
73
+ "on_device_sampling": false,
74
+ "optimum_neuron_version": "0.4.5.dev2",
75
+ "output_logits": false,
76
+ "pp_degree": 1,
77
+ "sequence_length": 4096,
78
+ "speculation_length": 0,
79
+ "start_rank_id": 0,
80
+ "target": "trn1",
81
+ "torch_dtype": "bfloat16",
82
+ "tp_degree": 8
83
+ },
84
+ "num_attention_heads": 32,
85
+ "num_hidden_layers": 36,
86
+ "num_key_value_heads": 8,
87
+ "rms_norm_eps": 1e-06,
88
+ "rope_scaling": null,
89
+ "rope_theta": 1000000,
90
+ "sliding_window": null,
91
+ "tie_word_embeddings": false,
92
+ "use_cache": true,
93
+ "use_sliding_window": false,
94
+ "vocab_size": 151665
95
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/af58eb15d8e02338dc2f2e880e9c6ec803a98278914b3606acdcc252e7e18429/ae5d00317f4b117f94a8.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-8B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 4096,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 12288,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention"
53
+ ],
54
+ "max_position_embeddings": 40960,
55
+ "max_window_layers": 36,
56
+ "model_type": "qwen3",
57
+ "neuron": {
58
+ "_serialized_key": "NxDNeuronConfig",
59
+ "batch_size": 16,
60
+ "capacity_factor": null,
61
+ "checkpoint_id": "Qwen/Qwen3-Embedding-8B",
62
+ "checkpoint_revision": "1d8ad4ca9b3dd8059ad90a75d4983776a23d44af",
63
+ "continuous_batching": false,
64
+ "ep_degree": 1,
65
+ "fused_qkv": true,
66
+ "glu_mlp": true,
67
+ "local_ranks_size": 8,
68
+ "max_batch_size": 16,
69
+ "max_context_length": 1024,
70
+ "max_topk": 256,
71
+ "n_active_tokens": 1024,
72
+ "neuronxcc_version": "2.21.33363.0+82129205",
73
+ "on_device_sampling": false,
74
+ "optimum_neuron_version": "0.4.5.dev2",
75
+ "output_logits": false,
76
+ "pp_degree": 1,
77
+ "sequence_length": 1024,
78
+ "speculation_length": 0,
79
+ "start_rank_id": 0,
80
+ "target": "trn1",
81
+ "torch_dtype": "bfloat16",
82
+ "tp_degree": 8
83
+ },
84
+ "num_attention_heads": 32,
85
+ "num_hidden_layers": 36,
86
+ "num_key_value_heads": 8,
87
+ "rms_norm_eps": 1e-06,
88
+ "rope_scaling": null,
89
+ "rope_theta": 1000000,
90
+ "sliding_window": null,
91
+ "tie_word_embeddings": false,
92
+ "use_cache": true,
93
+ "use_sliding_window": false,
94
+ "vocab_size": 151665
95
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/af58eb15d8e02338dc2f2e880e9c6ec803a98278914b3606acdcc252e7e18429/d9b9f628f0dee7a926d6.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-8B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 4096,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 12288,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention"
53
+ ],
54
+ "max_position_embeddings": 40960,
55
+ "max_window_layers": 36,
56
+ "model_type": "qwen3",
57
+ "neuron": {
58
+ "_serialized_key": "NxDNeuronConfig",
59
+ "batch_size": 32,
60
+ "capacity_factor": null,
61
+ "checkpoint_id": "Qwen/Qwen3-Embedding-8B",
62
+ "checkpoint_revision": "1d8ad4ca9b3dd8059ad90a75d4983776a23d44af",
63
+ "continuous_batching": false,
64
+ "ep_degree": 1,
65
+ "fused_qkv": true,
66
+ "glu_mlp": true,
67
+ "local_ranks_size": 8,
68
+ "max_batch_size": 32,
69
+ "max_context_length": 4096,
70
+ "max_topk": 256,
71
+ "n_active_tokens": 4096,
72
+ "neuronxcc_version": "2.21.33363.0+82129205",
73
+ "on_device_sampling": false,
74
+ "optimum_neuron_version": "0.4.5.dev2",
75
+ "output_logits": false,
76
+ "pp_degree": 1,
77
+ "sequence_length": 4096,
78
+ "speculation_length": 0,
79
+ "start_rank_id": 0,
80
+ "target": "trn1",
81
+ "torch_dtype": "bfloat16",
82
+ "tp_degree": 8
83
+ },
84
+ "num_attention_heads": 32,
85
+ "num_hidden_layers": 36,
86
+ "num_key_value_heads": 8,
87
+ "rms_norm_eps": 1e-06,
88
+ "rope_scaling": null,
89
+ "rope_theta": 1000000,
90
+ "sliding_window": null,
91
+ "tie_word_embeddings": false,
92
+ "use_cache": true,
93
+ "use_sliding_window": false,
94
+ "vocab_size": 151665
95
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/07552dc6c695df3ea557.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-4B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2560,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 9728,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention"
53
+ ],
54
+ "max_position_embeddings": 40960,
55
+ "max_window_layers": 36,
56
+ "model_type": "qwen3",
57
+ "neuron": {
58
+ "_serialized_key": "NxDNeuronConfig",
59
+ "batch_size": 1,
60
+ "capacity_factor": null,
61
+ "checkpoint_id": "Qwen/Qwen3-Embedding-4B",
62
+ "checkpoint_revision": "5cf2132abc99cad020ac570b19d031efec650f2b",
63
+ "continuous_batching": false,
64
+ "ep_degree": 1,
65
+ "fused_qkv": true,
66
+ "glu_mlp": true,
67
+ "local_ranks_size": 1,
68
+ "max_batch_size": 1,
69
+ "max_context_length": 4096,
70
+ "max_topk": 256,
71
+ "n_active_tokens": 4096,
72
+ "neuronxcc_version": "2.21.33363.0+82129205",
73
+ "on_device_sampling": false,
74
+ "optimum_neuron_version": "0.4.5.dev2",
75
+ "output_logits": false,
76
+ "pp_degree": 1,
77
+ "sequence_length": 4096,
78
+ "speculation_length": 0,
79
+ "start_rank_id": 0,
80
+ "target": "trn1",
81
+ "torch_dtype": "bfloat16",
82
+ "tp_degree": 1
83
+ },
84
+ "num_attention_heads": 32,
85
+ "num_hidden_layers": 36,
86
+ "num_key_value_heads": 8,
87
+ "rms_norm_eps": 1e-06,
88
+ "rope_scaling": null,
89
+ "rope_theta": 1000000,
90
+ "sliding_window": null,
91
+ "tie_word_embeddings": true,
92
+ "use_cache": true,
93
+ "use_sliding_window": false,
94
+ "vocab_size": 151665
95
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/25770924bad8fff9ec23.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-4B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2560,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 9728,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention"
53
+ ],
54
+ "max_position_embeddings": 40960,
55
+ "max_window_layers": 36,
56
+ "model_type": "qwen3",
57
+ "neuron": {
58
+ "_serialized_key": "NxDNeuronConfig",
59
+ "batch_size": 8,
60
+ "capacity_factor": null,
61
+ "checkpoint_id": "Qwen/Qwen3-Embedding-4B",
62
+ "checkpoint_revision": "5cf2132abc99cad020ac570b19d031efec650f2b",
63
+ "continuous_batching": false,
64
+ "ep_degree": 1,
65
+ "fused_qkv": true,
66
+ "glu_mlp": true,
67
+ "local_ranks_size": 8,
68
+ "max_batch_size": 8,
69
+ "max_context_length": 4096,
70
+ "max_topk": 256,
71
+ "n_active_tokens": 4096,
72
+ "neuronxcc_version": "2.21.33363.0+82129205",
73
+ "on_device_sampling": false,
74
+ "optimum_neuron_version": "0.4.5.dev2",
75
+ "output_logits": false,
76
+ "pp_degree": 1,
77
+ "sequence_length": 4096,
78
+ "speculation_length": 0,
79
+ "start_rank_id": 0,
80
+ "target": "trn1",
81
+ "torch_dtype": "bfloat16",
82
+ "tp_degree": 8
83
+ },
84
+ "num_attention_heads": 32,
85
+ "num_hidden_layers": 36,
86
+ "num_key_value_heads": 8,
87
+ "rms_norm_eps": 1e-06,
88
+ "rope_scaling": null,
89
+ "rope_theta": 1000000,
90
+ "sliding_window": null,
91
+ "tie_word_embeddings": true,
92
+ "use_cache": true,
93
+ "use_sliding_window": false,
94
+ "vocab_size": 151665
95
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/4f788775782f89b676c9.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-4B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2560,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 9728,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention"
53
+ ],
54
+ "max_position_embeddings": 40960,
55
+ "max_window_layers": 36,
56
+ "model_type": "qwen3",
57
+ "neuron": {
58
+ "_serialized_key": "NxDNeuronConfig",
59
+ "batch_size": 16,
60
+ "capacity_factor": null,
61
+ "checkpoint_id": "Qwen/Qwen3-Embedding-4B",
62
+ "checkpoint_revision": "5cf2132abc99cad020ac570b19d031efec650f2b",
63
+ "continuous_batching": false,
64
+ "ep_degree": 1,
65
+ "fused_qkv": true,
66
+ "glu_mlp": true,
67
+ "local_ranks_size": 8,
68
+ "max_batch_size": 16,
69
+ "max_context_length": 1024,
70
+ "max_topk": 256,
71
+ "n_active_tokens": 1024,
72
+ "neuronxcc_version": "2.21.33363.0+82129205",
73
+ "on_device_sampling": false,
74
+ "optimum_neuron_version": "0.4.5.dev2",
75
+ "output_logits": false,
76
+ "pp_degree": 1,
77
+ "sequence_length": 1024,
78
+ "speculation_length": 0,
79
+ "start_rank_id": 0,
80
+ "target": "trn1",
81
+ "torch_dtype": "bfloat16",
82
+ "tp_degree": 8
83
+ },
84
+ "num_attention_heads": 32,
85
+ "num_hidden_layers": 36,
86
+ "num_key_value_heads": 8,
87
+ "rms_norm_eps": 1e-06,
88
+ "rope_scaling": null,
89
+ "rope_theta": 1000000,
90
+ "sliding_window": null,
91
+ "tie_word_embeddings": true,
92
+ "use_cache": true,
93
+ "use_sliding_window": false,
94
+ "vocab_size": 151665
95
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/7190036e8ed3be94399f.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-4B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2560,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 9728,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention"
53
+ ],
54
+ "max_position_embeddings": 40960,
55
+ "max_window_layers": 36,
56
+ "model_type": "qwen3",
57
+ "neuron": {
58
+ "_serialized_key": "NxDNeuronConfig",
59
+ "batch_size": 16,
60
+ "capacity_factor": null,
61
+ "checkpoint_id": "Qwen/Qwen3-Embedding-4B",
62
+ "checkpoint_revision": "5cf2132abc99cad020ac570b19d031efec650f2b",
63
+ "continuous_batching": false,
64
+ "ep_degree": 1,
65
+ "fused_qkv": true,
66
+ "glu_mlp": true,
67
+ "local_ranks_size": 8,
68
+ "max_batch_size": 16,
69
+ "max_context_length": 4096,
70
+ "max_topk": 256,
71
+ "n_active_tokens": 4096,
72
+ "neuronxcc_version": "2.21.33363.0+82129205",
73
+ "on_device_sampling": false,
74
+ "optimum_neuron_version": "0.4.5.dev2",
75
+ "output_logits": false,
76
+ "pp_degree": 1,
77
+ "sequence_length": 4096,
78
+ "speculation_length": 0,
79
+ "start_rank_id": 0,
80
+ "target": "trn1",
81
+ "torch_dtype": "bfloat16",
82
+ "tp_degree": 8
83
+ },
84
+ "num_attention_heads": 32,
85
+ "num_hidden_layers": 36,
86
+ "num_key_value_heads": 8,
87
+ "rms_norm_eps": 1e-06,
88
+ "rope_scaling": null,
89
+ "rope_theta": 1000000,
90
+ "sliding_window": null,
91
+ "tie_word_embeddings": true,
92
+ "use_cache": true,
93
+ "use_sliding_window": false,
94
+ "vocab_size": 151665
95
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/9054b94f39e5c374b6b8.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-4B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2560,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 9728,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention"
53
+ ],
54
+ "max_position_embeddings": 40960,
55
+ "max_window_layers": 36,
56
+ "model_type": "qwen3",
57
+ "neuron": {
58
+ "_serialized_key": "NxDNeuronConfig",
59
+ "batch_size": 32,
60
+ "capacity_factor": null,
61
+ "checkpoint_id": "Qwen/Qwen3-Embedding-4B",
62
+ "checkpoint_revision": "5cf2132abc99cad020ac570b19d031efec650f2b",
63
+ "continuous_batching": false,
64
+ "ep_degree": 1,
65
+ "fused_qkv": true,
66
+ "glu_mlp": true,
67
+ "local_ranks_size": 8,
68
+ "max_batch_size": 32,
69
+ "max_context_length": 4096,
70
+ "max_topk": 256,
71
+ "n_active_tokens": 4096,
72
+ "neuronxcc_version": "2.21.33363.0+82129205",
73
+ "on_device_sampling": false,
74
+ "optimum_neuron_version": "0.4.5.dev2",
75
+ "output_logits": false,
76
+ "pp_degree": 1,
77
+ "sequence_length": 4096,
78
+ "speculation_length": 0,
79
+ "start_rank_id": 0,
80
+ "target": "trn1",
81
+ "torch_dtype": "bfloat16",
82
+ "tp_degree": 8
83
+ },
84
+ "num_attention_heads": 32,
85
+ "num_hidden_layers": 36,
86
+ "num_key_value_heads": 8,
87
+ "rms_norm_eps": 1e-06,
88
+ "rope_scaling": null,
89
+ "rope_theta": 1000000,
90
+ "sliding_window": null,
91
+ "tie_word_embeddings": true,
92
+ "use_cache": true,
93
+ "use_sliding_window": false,
94
+ "vocab_size": 151665
95
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/bed88d82f075f516941d.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-4B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2560,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 9728,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention"
53
+ ],
54
+ "max_position_embeddings": 40960,
55
+ "max_window_layers": 36,
56
+ "model_type": "qwen3",
57
+ "neuron": {
58
+ "_serialized_key": "NxDNeuronConfig",
59
+ "batch_size": 32,
60
+ "capacity_factor": null,
61
+ "checkpoint_id": "Qwen/Qwen3-Embedding-4B",
62
+ "checkpoint_revision": "5cf2132abc99cad020ac570b19d031efec650f2b",
63
+ "continuous_batching": false,
64
+ "ep_degree": 1,
65
+ "fused_qkv": true,
66
+ "glu_mlp": true,
67
+ "local_ranks_size": 8,
68
+ "max_batch_size": 32,
69
+ "max_context_length": 1024,
70
+ "max_topk": 256,
71
+ "n_active_tokens": 1024,
72
+ "neuronxcc_version": "2.21.33363.0+82129205",
73
+ "on_device_sampling": false,
74
+ "optimum_neuron_version": "0.4.5.dev2",
75
+ "output_logits": false,
76
+ "pp_degree": 1,
77
+ "sequence_length": 1024,
78
+ "speculation_length": 0,
79
+ "start_rank_id": 0,
80
+ "target": "trn1",
81
+ "torch_dtype": "bfloat16",
82
+ "tp_degree": 8
83
+ },
84
+ "num_attention_heads": 32,
85
+ "num_hidden_layers": 36,
86
+ "num_key_value_heads": 8,
87
+ "rms_norm_eps": 1e-06,
88
+ "rope_scaling": null,
89
+ "rope_theta": 1000000,
90
+ "sliding_window": null,
91
+ "tie_word_embeddings": true,
92
+ "use_cache": true,
93
+ "use_sliding_window": false,
94
+ "vocab_size": 151665
95
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/cf5f2cf31cc338bcdce9.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-4B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2560,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 9728,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention"
53
+ ],
54
+ "max_position_embeddings": 40960,
55
+ "max_window_layers": 36,
56
+ "model_type": "qwen3",
57
+ "neuron": {
58
+ "_serialized_key": "NxDNeuronConfig",
59
+ "batch_size": 16,
60
+ "capacity_factor": null,
61
+ "checkpoint_id": "Qwen/Qwen3-Embedding-4B",
62
+ "checkpoint_revision": "5cf2132abc99cad020ac570b19d031efec650f2b",
63
+ "continuous_batching": false,
64
+ "ep_degree": 1,
65
+ "fused_qkv": true,
66
+ "glu_mlp": true,
67
+ "local_ranks_size": 2,
68
+ "max_batch_size": 16,
69
+ "max_context_length": 1024,
70
+ "max_topk": 256,
71
+ "n_active_tokens": 1024,
72
+ "neuronxcc_version": "2.21.33363.0+82129205",
73
+ "on_device_sampling": false,
74
+ "optimum_neuron_version": "0.4.5.dev2",
75
+ "output_logits": false,
76
+ "pp_degree": 1,
77
+ "sequence_length": 1024,
78
+ "speculation_length": 0,
79
+ "start_rank_id": 0,
80
+ "target": "trn1",
81
+ "torch_dtype": "bfloat16",
82
+ "tp_degree": 2
83
+ },
84
+ "num_attention_heads": 32,
85
+ "num_hidden_layers": 36,
86
+ "num_key_value_heads": 8,
87
+ "rms_norm_eps": 1e-06,
88
+ "rope_scaling": null,
89
+ "rope_theta": 1000000,
90
+ "sliding_window": null,
91
+ "tie_word_embeddings": true,
92
+ "use_cache": true,
93
+ "use_sliding_window": false,
94
+ "vocab_size": 151665
95
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/d06a9ca97bbded610b72.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-4B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2560,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 9728,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention"
53
+ ],
54
+ "max_position_embeddings": 40960,
55
+ "max_window_layers": 36,
56
+ "model_type": "qwen3",
57
+ "neuron": {
58
+ "_serialized_key": "NxDNeuronConfig",
59
+ "batch_size": 4,
60
+ "capacity_factor": null,
61
+ "checkpoint_id": "Qwen/Qwen3-Embedding-4B",
62
+ "checkpoint_revision": "5cf2132abc99cad020ac570b19d031efec650f2b",
63
+ "continuous_batching": false,
64
+ "ep_degree": 1,
65
+ "fused_qkv": true,
66
+ "glu_mlp": true,
67
+ "local_ranks_size": 2,
68
+ "max_batch_size": 4,
69
+ "max_context_length": 1024,
70
+ "max_topk": 256,
71
+ "n_active_tokens": 1024,
72
+ "neuronxcc_version": "2.21.33363.0+82129205",
73
+ "on_device_sampling": false,
74
+ "optimum_neuron_version": "0.4.5.dev2",
75
+ "output_logits": false,
76
+ "pp_degree": 1,
77
+ "sequence_length": 1024,
78
+ "speculation_length": 0,
79
+ "start_rank_id": 0,
80
+ "target": "trn1",
81
+ "torch_dtype": "bfloat16",
82
+ "tp_degree": 2
83
+ },
84
+ "num_attention_heads": 32,
85
+ "num_hidden_layers": 36,
86
+ "num_key_value_heads": 8,
87
+ "rms_norm_eps": 1e-06,
88
+ "rope_scaling": null,
89
+ "rope_theta": 1000000,
90
+ "sliding_window": null,
91
+ "tie_word_embeddings": true,
92
+ "use_cache": true,
93
+ "use_sliding_window": false,
94
+ "vocab_size": 151665
95
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/d7e075cbb2c6bd78d6b4.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-4B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2560,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 9728,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention"
53
+ ],
54
+ "max_position_embeddings": 40960,
55
+ "max_window_layers": 36,
56
+ "model_type": "qwen3",
57
+ "neuron": {
58
+ "_serialized_key": "NxDNeuronConfig",
59
+ "batch_size": 64,
60
+ "capacity_factor": null,
61
+ "checkpoint_id": "Qwen/Qwen3-Embedding-4B",
62
+ "checkpoint_revision": "5cf2132abc99cad020ac570b19d031efec650f2b",
63
+ "continuous_batching": false,
64
+ "ep_degree": 1,
65
+ "fused_qkv": true,
66
+ "glu_mlp": true,
67
+ "local_ranks_size": 8,
68
+ "max_batch_size": 64,
69
+ "max_context_length": 4096,
70
+ "max_topk": 256,
71
+ "n_active_tokens": 4096,
72
+ "neuronxcc_version": "2.21.33363.0+82129205",
73
+ "on_device_sampling": false,
74
+ "optimum_neuron_version": "0.4.5.dev2",
75
+ "output_logits": false,
76
+ "pp_degree": 1,
77
+ "sequence_length": 4096,
78
+ "speculation_length": 0,
79
+ "start_rank_id": 0,
80
+ "target": "trn1",
81
+ "torch_dtype": "bfloat16",
82
+ "tp_degree": 8
83
+ },
84
+ "num_attention_heads": 32,
85
+ "num_hidden_layers": 36,
86
+ "num_key_value_heads": 8,
87
+ "rms_norm_eps": 1e-06,
88
+ "rope_scaling": null,
89
+ "rope_theta": 1000000,
90
+ "sliding_window": null,
91
+ "tie_word_embeddings": true,
92
+ "use_cache": true,
93
+ "use_sliding_window": false,
94
+ "vocab_size": 151665
95
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/e189289909e4808416f7.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-4B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2560,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 9728,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention"
53
+ ],
54
+ "max_position_embeddings": 40960,
55
+ "max_window_layers": 36,
56
+ "model_type": "qwen3",
57
+ "neuron": {
58
+ "_serialized_key": "NxDNeuronConfig",
59
+ "batch_size": 1,
60
+ "capacity_factor": null,
61
+ "checkpoint_id": "Qwen/Qwen3-Embedding-4B",
62
+ "checkpoint_revision": "5cf2132abc99cad020ac570b19d031efec650f2b",
63
+ "continuous_batching": false,
64
+ "ep_degree": 1,
65
+ "fused_qkv": true,
66
+ "glu_mlp": true,
67
+ "local_ranks_size": 2,
68
+ "max_batch_size": 1,
69
+ "max_context_length": 1024,
70
+ "max_topk": 256,
71
+ "n_active_tokens": 1024,
72
+ "neuronxcc_version": "2.21.33363.0+82129205",
73
+ "on_device_sampling": false,
74
+ "optimum_neuron_version": "0.4.5.dev2",
75
+ "output_logits": false,
76
+ "pp_degree": 1,
77
+ "sequence_length": 1024,
78
+ "speculation_length": 0,
79
+ "start_rank_id": 0,
80
+ "target": "trn1",
81
+ "torch_dtype": "bfloat16",
82
+ "tp_degree": 2
83
+ },
84
+ "num_attention_heads": 32,
85
+ "num_hidden_layers": 36,
86
+ "num_key_value_heads": 8,
87
+ "rms_norm_eps": 1e-06,
88
+ "rope_scaling": null,
89
+ "rope_theta": 1000000,
90
+ "sliding_window": null,
91
+ "tie_word_embeddings": true,
92
+ "use_cache": true,
93
+ "use_sliding_window": false,
94
+ "vocab_size": 151665
95
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/e0b6d1e2424243dcd9ff1755e02969dcc312d14df531d876c5c2892f285b2863/e79a64794f75d8045060.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-4B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2560,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 9728,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention"
53
+ ],
54
+ "max_position_embeddings": 40960,
55
+ "max_window_layers": 36,
56
+ "model_type": "qwen3",
57
+ "neuron": {
58
+ "_serialized_key": "NxDNeuronConfig",
59
+ "batch_size": 8,
60
+ "capacity_factor": null,
61
+ "checkpoint_id": "Qwen/Qwen3-Embedding-4B",
62
+ "checkpoint_revision": "5cf2132abc99cad020ac570b19d031efec650f2b",
63
+ "continuous_batching": false,
64
+ "ep_degree": 1,
65
+ "fused_qkv": true,
66
+ "glu_mlp": true,
67
+ "local_ranks_size": 2,
68
+ "max_batch_size": 8,
69
+ "max_context_length": 1024,
70
+ "max_topk": 256,
71
+ "n_active_tokens": 1024,
72
+ "neuronxcc_version": "2.21.33363.0+82129205",
73
+ "on_device_sampling": false,
74
+ "optimum_neuron_version": "0.4.5.dev2",
75
+ "output_logits": false,
76
+ "pp_degree": 1,
77
+ "sequence_length": 1024,
78
+ "speculation_length": 0,
79
+ "start_rank_id": 0,
80
+ "target": "trn1",
81
+ "torch_dtype": "bfloat16",
82
+ "tp_degree": 2
83
+ },
84
+ "num_attention_heads": 32,
85
+ "num_hidden_layers": 36,
86
+ "num_key_value_heads": 8,
87
+ "rms_norm_eps": 1e-06,
88
+ "rope_scaling": null,
89
+ "rope_theta": 1000000,
90
+ "sliding_window": null,
91
+ "tie_word_embeddings": true,
92
+ "use_cache": true,
93
+ "use_sliding_window": false,
94
+ "vocab_size": 151665
95
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/qwen3/Qwen/Qwen3-Embedding-0.6B/1c4ee5d7dc71b8843fca.json ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-0.6B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 1024,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 3072,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention"
45
+ ],
46
+ "max_position_embeddings": 32768,
47
+ "max_window_layers": 28,
48
+ "model_type": "qwen3",
49
+ "neuron": {
50
+ "_serialized_key": "NxDNeuronConfig",
51
+ "batch_size": 1,
52
+ "capacity_factor": null,
53
+ "checkpoint_id": "Qwen/Qwen3-Embedding-0.6B",
54
+ "checkpoint_revision": "c54f2e6e80b2d7b7de06f51cec4959f6b3e03418",
55
+ "continuous_batching": false,
56
+ "ep_degree": 1,
57
+ "fused_qkv": true,
58
+ "glu_mlp": true,
59
+ "local_ranks_size": 1,
60
+ "max_batch_size": 1,
61
+ "max_context_length": 8192,
62
+ "max_topk": 256,
63
+ "n_active_tokens": 8192,
64
+ "neuronxcc_version": "2.21.33363.0+82129205",
65
+ "on_device_sampling": false,
66
+ "optimum_neuron_version": "0.4.5.dev2",
67
+ "output_logits": false,
68
+ "pp_degree": 1,
69
+ "sequence_length": 8192,
70
+ "speculation_length": 0,
71
+ "start_rank_id": 0,
72
+ "target": "trn1",
73
+ "torch_dtype": "bfloat16",
74
+ "tp_degree": 1
75
+ },
76
+ "num_attention_heads": 16,
77
+ "num_hidden_layers": 28,
78
+ "num_key_value_heads": 8,
79
+ "rms_norm_eps": 1e-06,
80
+ "rope_scaling": null,
81
+ "rope_theta": 1000000,
82
+ "sliding_window": null,
83
+ "tie_word_embeddings": true,
84
+ "use_cache": true,
85
+ "use_sliding_window": false,
86
+ "vocab_size": 151669
87
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/qwen3/Qwen/Qwen3-Embedding-4B/07552dc6c695df3ea557.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-4B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2560,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 9728,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention"
53
+ ],
54
+ "max_position_embeddings": 40960,
55
+ "max_window_layers": 36,
56
+ "model_type": "qwen3",
57
+ "neuron": {
58
+ "_serialized_key": "NxDNeuronConfig",
59
+ "batch_size": 1,
60
+ "capacity_factor": null,
61
+ "checkpoint_id": "Qwen/Qwen3-Embedding-4B",
62
+ "checkpoint_revision": "5cf2132abc99cad020ac570b19d031efec650f2b",
63
+ "continuous_batching": false,
64
+ "ep_degree": 1,
65
+ "fused_qkv": true,
66
+ "glu_mlp": true,
67
+ "local_ranks_size": 1,
68
+ "max_batch_size": 1,
69
+ "max_context_length": 4096,
70
+ "max_topk": 256,
71
+ "n_active_tokens": 4096,
72
+ "neuronxcc_version": "2.21.33363.0+82129205",
73
+ "on_device_sampling": false,
74
+ "optimum_neuron_version": "0.4.5.dev2",
75
+ "output_logits": false,
76
+ "pp_degree": 1,
77
+ "sequence_length": 4096,
78
+ "speculation_length": 0,
79
+ "start_rank_id": 0,
80
+ "target": "trn1",
81
+ "torch_dtype": "bfloat16",
82
+ "tp_degree": 1
83
+ },
84
+ "num_attention_heads": 32,
85
+ "num_hidden_layers": 36,
86
+ "num_key_value_heads": 8,
87
+ "rms_norm_eps": 1e-06,
88
+ "rope_scaling": null,
89
+ "rope_theta": 1000000,
90
+ "sliding_window": null,
91
+ "tie_word_embeddings": true,
92
+ "use_cache": true,
93
+ "use_sliding_window": false,
94
+ "vocab_size": 151665
95
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/qwen3/Qwen/Qwen3-Embedding-8B/d9b9f628f0dee7a926d6.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-Embedding-8B",
4
+ "_task": "feature-extraction",
5
+ "architectures": [
6
+ "Qwen3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 4096,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 12288,
16
+ "layer_types": [
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention"
53
+ ],
54
+ "max_position_embeddings": 40960,
55
+ "max_window_layers": 36,
56
+ "model_type": "qwen3",
57
+ "neuron": {
58
+ "_serialized_key": "NxDNeuronConfig",
59
+ "batch_size": 32,
60
+ "capacity_factor": null,
61
+ "checkpoint_id": "Qwen/Qwen3-Embedding-8B",
62
+ "checkpoint_revision": "1d8ad4ca9b3dd8059ad90a75d4983776a23d44af",
63
+ "continuous_batching": false,
64
+ "ep_degree": 1,
65
+ "fused_qkv": true,
66
+ "glu_mlp": true,
67
+ "local_ranks_size": 8,
68
+ "max_batch_size": 32,
69
+ "max_context_length": 4096,
70
+ "max_topk": 256,
71
+ "n_active_tokens": 4096,
72
+ "neuronxcc_version": "2.21.33363.0+82129205",
73
+ "on_device_sampling": false,
74
+ "optimum_neuron_version": "0.4.5.dev2",
75
+ "output_logits": false,
76
+ "pp_degree": 1,
77
+ "sequence_length": 4096,
78
+ "speculation_length": 0,
79
+ "start_rank_id": 0,
80
+ "target": "trn1",
81
+ "torch_dtype": "bfloat16",
82
+ "tp_degree": 8
83
+ },
84
+ "num_attention_heads": 32,
85
+ "num_hidden_layers": 36,
86
+ "num_key_value_heads": 8,
87
+ "rms_norm_eps": 1e-06,
88
+ "rope_scaling": null,
89
+ "rope_theta": 1000000,
90
+ "sliding_window": null,
91
+ "tie_word_embeddings": false,
92
+ "use_cache": true,
93
+ "use_sliding_window": false,
94
+ "vocab_size": 151665
95
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5.dev2/smollm3/HuggingFaceTB/SmolLM3-3B/588f7836eb16c9483d90.json ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "HuggingFaceTB/SmolLM3-3B",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "SmolLM3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "hidden_act": "silu",
12
+ "hidden_size": 2048,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 11008,
15
+ "layer_types": [
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention"
52
+ ],
53
+ "max_position_embeddings": 65536,
54
+ "max_window_layers": 28,
55
+ "mlp_bias": false,
56
+ "model_type": "smollm3",
57
+ "neuron": {
58
+ "_serialized_key": "NxDNeuronConfig",
59
+ "batch_size": 1,
60
+ "capacity_factor": null,
61
+ "checkpoint_id": "HuggingFaceTB/SmolLM3-3B",
62
+ "checkpoint_revision": "a07cc9a04f16550a088caea529712d1d335b0ac1",
63
+ "continuous_batching": false,
64
+ "ep_degree": 1,
65
+ "fused_qkv": true,
66
+ "glu_mlp": true,
67
+ "local_ranks_size": 2,
68
+ "max_batch_size": 1,
69
+ "max_context_length": 1024,
70
+ "max_topk": 256,
71
+ "n_active_tokens": 1024,
72
+ "neuronxcc_version": "2.21.33363.0+82129205",
73
+ "on_device_sampling": true,
74
+ "optimum_neuron_version": "0.4.5.dev2",
75
+ "output_logits": false,
76
+ "pp_degree": 1,
77
+ "sequence_length": 1024,
78
+ "speculation_length": 0,
79
+ "start_rank_id": 0,
80
+ "target": "trn1",
81
+ "torch_dtype": "bfloat16",
82
+ "tp_degree": 2
83
+ },
84
+ "no_rope_layer_interval": 4,
85
+ "no_rope_layers": [
86
+ 1,
87
+ 1,
88
+ 1,
89
+ 0,
90
+ 1,
91
+ 1,
92
+ 1,
93
+ 0,
94
+ 1,
95
+ 1,
96
+ 1,
97
+ 0,
98
+ 1,
99
+ 1,
100
+ 1,
101
+ 0,
102
+ 1,
103
+ 1,
104
+ 1,
105
+ 0,
106
+ 1,
107
+ 1,
108
+ 1,
109
+ 0,
110
+ 1,
111
+ 1,
112
+ 1,
113
+ 0,
114
+ 1,
115
+ 1,
116
+ 1,
117
+ 0,
118
+ 1,
119
+ 1,
120
+ 1,
121
+ 0
122
+ ],
123
+ "num_attention_heads": 16,
124
+ "num_hidden_layers": 36,
125
+ "num_key_value_heads": 4,
126
+ "pretraining_tp": 2,
127
+ "rms_norm_eps": 1e-06,
128
+ "rope_scaling": null,
129
+ "rope_theta": 5000000.0,
130
+ "sliding_window": null,
131
+ "use_cache": false,
132
+ "use_sliding_window": false,
133
+ "vocab_size": 128256
134
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5/cf6b9a360dcf294104671106bae2adbd9fd291823bb60a351883163684073231/22277b72a5862009f452.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "unsloth/Llama-3.2-1B-Instruct",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "LlamaForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 64,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2048,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 8192,
16
+ "max_position_embeddings": 131072,
17
+ "mlp_bias": false,
18
+ "model_type": "llama",
19
+ "neuron": {
20
+ "_serialized_key": "NxDNeuronConfig",
21
+ "batch_size": 4,
22
+ "capacity_factor": null,
23
+ "checkpoint_id": "unsloth/Llama-3.2-1B-Instruct",
24
+ "checkpoint_revision": "5a8abab4a5d6f164389b1079fb721cfab8d7126c",
25
+ "continuous_batching": true,
26
+ "ep_degree": 1,
27
+ "fused_qkv": true,
28
+ "glu_mlp": true,
29
+ "local_ranks_size": 2,
30
+ "max_batch_size": 4,
31
+ "max_context_length": 4096,
32
+ "max_topk": 256,
33
+ "n_active_tokens": 4096,
34
+ "neuronxcc_version": "2.21.33363.0+82129205",
35
+ "on_device_sampling": true,
36
+ "optimum_neuron_version": "0.4.5",
37
+ "output_logits": false,
38
+ "pp_degree": 1,
39
+ "sequence_length": 4096,
40
+ "speculation_length": 0,
41
+ "start_rank_id": 0,
42
+ "target": "trn1",
43
+ "torch_dtype": "bfloat16",
44
+ "tp_degree": 2
45
+ },
46
+ "num_attention_heads": 32,
47
+ "num_hidden_layers": 16,
48
+ "num_key_value_heads": 8,
49
+ "pretraining_tp": 1,
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_scaling": {
52
+ "factor": 32.0,
53
+ "high_freq_factor": 4.0,
54
+ "low_freq_factor": 1.0,
55
+ "original_max_position_embeddings": 8192,
56
+ "rope_type": "llama3"
57
+ },
58
+ "rope_theta": 500000.0,
59
+ "tie_word_embeddings": true,
60
+ "unsloth_fixed": true,
61
+ "use_cache": true,
62
+ "vocab_size": 128256
63
+ }
neuronxcc-2.21.33363.0+82129205/0_REGISTRY/0.4.5/llama/unsloth/Llama-3.2-1B-Instruct/22277b72a5862009f452.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "unsloth/Llama-3.2-1B-Instruct",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "LlamaForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "dtype": "bfloat16",
11
+ "head_dim": 64,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2048,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 8192,
16
+ "max_position_embeddings": 131072,
17
+ "mlp_bias": false,
18
+ "model_type": "llama",
19
+ "neuron": {
20
+ "_serialized_key": "NxDNeuronConfig",
21
+ "batch_size": 4,
22
+ "capacity_factor": null,
23
+ "checkpoint_id": "unsloth/Llama-3.2-1B-Instruct",
24
+ "checkpoint_revision": "5a8abab4a5d6f164389b1079fb721cfab8d7126c",
25
+ "continuous_batching": true,
26
+ "ep_degree": 1,
27
+ "fused_qkv": true,
28
+ "glu_mlp": true,
29
+ "local_ranks_size": 2,
30
+ "max_batch_size": 4,
31
+ "max_context_length": 4096,
32
+ "max_topk": 256,
33
+ "n_active_tokens": 4096,
34
+ "neuronxcc_version": "2.21.33363.0+82129205",
35
+ "on_device_sampling": true,
36
+ "optimum_neuron_version": "0.4.5",
37
+ "output_logits": false,
38
+ "pp_degree": 1,
39
+ "sequence_length": 4096,
40
+ "speculation_length": 0,
41
+ "start_rank_id": 0,
42
+ "target": "trn1",
43
+ "torch_dtype": "bfloat16",
44
+ "tp_degree": 2
45
+ },
46
+ "num_attention_heads": 32,
47
+ "num_hidden_layers": 16,
48
+ "num_key_value_heads": 8,
49
+ "pretraining_tp": 1,
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_scaling": {
52
+ "factor": 32.0,
53
+ "high_freq_factor": 4.0,
54
+ "low_freq_factor": 1.0,
55
+ "original_max_position_embeddings": 8192,
56
+ "rope_type": "llama3"
57
+ },
58
+ "rope_theta": 500000.0,
59
+ "tie_word_embeddings": true,
60
+ "unsloth_fixed": true,
61
+ "use_cache": true,
62
+ "vocab_size": 128256
63
+ }
neuronxcc-2.21.33363.0+82129205/MODULE_14268084bdb93e2af8ea+fb4cc044/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--auto-cast=none", "--model-type=transformer", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2 --vectorize-strided-dma ", "-O2", "--lnc=1", "--logfile=/tmp/nxd_model/encoding/_tp0_bk0/log-neuron-cc.txt"]
neuronxcc-2.21.33363.0+82129205/MODULE_14268084bdb93e2af8ea+fb4cc044/model.done ADDED
File without changes
neuronxcc-2.21.33363.0+82129205/MODULE_14268084bdb93e2af8ea+fb4cc044/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c252885b17164c4e4bf832f391aba7f3d8c7139bb59b1d4ee6df54c77398869
3
+ size 840640
neuronxcc-2.21.33363.0+82129205/MODULE_14268084bdb93e2af8ea+fb4cc044/model.neff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c01b0bcf9bc956fe6facbe1b5c2a47bb487a061ab522c3a8cb46305bae28028f
3
+ size 54754304
neuronxcc-2.21.33363.0+82129205/MODULE_17b4453648b482087f44+fb4cc044/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--auto-cast=none", "--model-type=transformer", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2 --vectorize-strided-dma ", "-O2", "--lnc=1", "--logfile=/tmp/nxd_model/encoding/_tp0_bk0/log-neuron-cc.txt"]
neuronxcc-2.21.33363.0+82129205/MODULE_17b4453648b482087f44+fb4cc044/model.done ADDED
File without changes
neuronxcc-2.21.33363.0+82129205/MODULE_17b4453648b482087f44+fb4cc044/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f061fa115ae7cc4aa283bb767164180d1817ccea49eb05c899e7f4d05b03a1db
3
+ size 618697
neuronxcc-2.21.33363.0+82129205/MODULE_17b4453648b482087f44+fb4cc044/model.neff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:116d5e8b16d09e647e218ae20fe1a255681d13daa59e28082c5e5ab4a7a8e03f
3
+ size 8070144
neuronxcc-2.21.33363.0+82129205/MODULE_1efaefc590ce7ee07e97+fb4cc044/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--auto-cast=none", "--model-type=transformer", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2 --vectorize-strided-dma ", "-O2", "--lnc=1", "--logfile=/tmp/nxd_model/encoding/_tp0_bk0/log-neuron-cc.txt"]
neuronxcc-2.21.33363.0+82129205/MODULE_1efaefc590ce7ee07e97+fb4cc044/model.done ADDED
File without changes
neuronxcc-2.21.33363.0+82129205/MODULE_1efaefc590ce7ee07e97+fb4cc044/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a917908cdedb3336190bdbfa5dc4c0b006052c9672b3c51ee0aa7c7a9e1e439
3
+ size 628622
neuronxcc-2.21.33363.0+82129205/MODULE_1efaefc590ce7ee07e97+fb4cc044/model.neff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cdc725c9067ddd09df8d0768aa1744294c6717afb9cb332d40ec2fa3d13f86d
3
+ size 2888704
neuronxcc-2.21.33363.0+82129205/MODULE_2828c4ae6bc360cc555f+fb4cc044/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--auto-cast=none", "--model-type=transformer", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2 --vectorize-strided-dma ", "-O2", "--lnc=1", "--logfile=/tmp/nxd_model/encoding/_tp0_bk0/log-neuron-cc.txt"]
neuronxcc-2.21.33363.0+82129205/MODULE_2828c4ae6bc360cc555f+fb4cc044/model.done ADDED
File without changes
neuronxcc-2.21.33363.0+82129205/MODULE_2828c4ae6bc360cc555f+fb4cc044/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4bfd87783375e038bf25e0e2d8810154384e5f8e2e73640820a95ef156e16d6
3
+ size 793180
neuronxcc-2.21.33363.0+82129205/MODULE_2828c4ae6bc360cc555f+fb4cc044/model.neff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:121a4a45c22c23eaa0eb9bc94dcd4f162557e2c0574c2f9eefb1d7bcd069895b
3
+ size 242791424
neuronxcc-2.21.33363.0+82129205/MODULE_2da63caa31e7595bc07f+fb4cc044/model.neff CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fd5dc789465f3d32882920308f3d4a712edf866512104c7effa624cfea21354d
3
  size 18473984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b096fd09961ede9fc78f22a88f39e2023b064d98e4ecf8cae48c55a4e5c7b80e
3
  size 18473984
neuronxcc-2.21.33363.0+82129205/MODULE_32f3df92f722f8d61840+fb4cc044/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--auto-cast=none", "--model-type=transformer", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2 --vectorize-strided-dma ", "-O2", "--lnc=1", "--logfile=/tmp/nxd_model/encoding/_tp0_bk0/log-neuron-cc.txt"]
neuronxcc-2.21.33363.0+82129205/MODULE_32f3df92f722f8d61840+fb4cc044/model.done ADDED
File without changes
neuronxcc-2.21.33363.0+82129205/MODULE_32f3df92f722f8d61840+fb4cc044/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8270a9ede2522ef5f9691a06f9f5622f5923a5b4344b00ac1bbe7b10b666e6de
3
+ size 838840
neuronxcc-2.21.33363.0+82129205/MODULE_32f3df92f722f8d61840+fb4cc044/model.neff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7237e792db77272b7eadefed43ad0b601d2d5eb78ea028a48d5a0a244ba47466
3
+ size 26809344
neuronxcc-2.21.33363.0+82129205/MODULE_3683fa1d292af356adae+fb4cc044/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--auto-cast=none", "--model-type=transformer", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2 --vectorize-strided-dma ", "-O2", "--lnc=1", "--logfile=/tmp/nxd_model/encoding/_tp0_bk0/log-neuron-cc.txt"]
neuronxcc-2.21.33363.0+82129205/MODULE_3683fa1d292af356adae+fb4cc044/model.done ADDED
File without changes
neuronxcc-2.21.33363.0+82129205/MODULE_3683fa1d292af356adae+fb4cc044/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a72c26ea4bfff05052405e8a360c5ead4dc213d746221968e452c3adbed7964
3
+ size 618481
neuronxcc-2.21.33363.0+82129205/MODULE_3683fa1d292af356adae+fb4cc044/model.neff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2413f921e113dbc56d84a84a618ea2db978d407f5460463124a47a05840ab24
3
+ size 4271104
neuronxcc-2.21.33363.0+82129205/MODULE_4effee1c1788c6eeab78+fb4cc044/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--auto-cast=none", "--model-type=transformer", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2 --vectorize-strided-dma ", "-O2", "--lnc=1", "--logfile=/tmp/nxd_model/encoding/_tp0_bk0/log-neuron-cc.txt"]