dacorvo HF Staff commited on
Commit
a9abc08
·
verified ·
1 Parent(s): dc23c18

Synchronizing local compiler cache.

Browse files
.gitattributes CHANGED
@@ -12206,3 +12206,6 @@ neuronxcc-2.21.18209.0+043b1bf7/MODULE_fc97bd251aea83d5ee8a+c2248236/model.neff
12206
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_b87ced40bc798acdbb9e+ca355898/model.neff filter=lfs diff=lfs merge=lfs -text
12207
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_b87ced40bc798acdbb9e+ca355898/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
12208
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_db004394bb0ee8750948+c2248236/model.neff filter=lfs diff=lfs merge=lfs -text
 
 
 
 
12206
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_b87ced40bc798acdbb9e+ca355898/model.neff filter=lfs diff=lfs merge=lfs -text
12207
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_b87ced40bc798acdbb9e+ca355898/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
12208
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_db004394bb0ee8750948+c2248236/model.neff filter=lfs diff=lfs merge=lfs -text
12209
+ neuronxcc-2.21.18209.0+043b1bf7/MODULE_9169e2934f6040938d53+ca355898/model.neff filter=lfs diff=lfs merge=lfs -text
12210
+ neuronxcc-2.21.18209.0+043b1bf7/MODULE_9169e2934f6040938d53+ca355898/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
12211
+ neuronxcc-2.21.18209.0+043b1bf7/MODULE_9aa5b581f9d17855f430+c2248236/model.neff filter=lfs diff=lfs merge=lfs -text
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.1.dev0/mixtral/mistralai/Mixtral-8x22B-Instruct-v0.1/b393a8aa5f0618c0a729.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "mistralai/Mixtral-8x22B-Instruct-v0.1",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "MixtralForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "head_dim": 128,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 6144,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 16384,
14
+ "max_position_embeddings": 65536,
15
+ "model_type": "mixtral",
16
+ "neuron": {
17
+ "_serialized_key": "NxDNeuronConfig",
18
+ "batch_size": 1,
19
+ "capacity_factor": null,
20
+ "checkpoint_id": "mistralai/Mixtral-8x22B-Instruct-v0.1",
21
+ "checkpoint_revision": "cc88a6cc19fbd17d9f1c0ee0b0d70a748dce698d",
22
+ "continuous_batching": false,
23
+ "enable_bucketing": false,
24
+ "ep_degree": 1,
25
+ "fused_qkv": false,
26
+ "glu_mlp": true,
27
+ "local_ranks_size": 32,
28
+ "max_batch_size": 1,
29
+ "max_context_length": 4096,
30
+ "max_topk": 256,
31
+ "n_active_tokens": 4096,
32
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
33
+ "on_device_sampling": false,
34
+ "optimum_neuron_version": "0.4.1.dev0",
35
+ "output_logits": false,
36
+ "pp_degree": 1,
37
+ "sequence_length": 4096,
38
+ "speculation_length": 0,
39
+ "start_rank_id": 0,
40
+ "target": "trn1",
41
+ "torch_dtype": "bfloat16",
42
+ "tp_degree": 32
43
+ },
44
+ "num_attention_heads": 48,
45
+ "num_experts_per_tok": 2,
46
+ "num_hidden_layers": 56,
47
+ "num_key_value_heads": 8,
48
+ "num_local_experts": 8,
49
+ "output_router_logits": false,
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_theta": 1000000.0,
52
+ "router_aux_loss_coef": 0.001,
53
+ "router_jitter_noise": 0.0,
54
+ "sliding_window": null,
55
+ "tie_word_embeddings": false,
56
+ "use_cache": true,
57
+ "vocab_size": 32768
58
+ }
neuronxcc-2.21.18209.0+043b1bf7/MODULE_9169e2934f6040938d53+ca355898/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--enable-saturate-infinity", "--enable-mixed-precision-accumulation", "--model-type", "transformer", "-O1", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2", "--auto-cast=none", "--internal-enable-dge-levels", "vector_dynamic_offsets", "--logfile=/tmp/nxd_model/token_generation_model/_tp0_bk0/log-neuron-cc.txt", "--enable-internal-neff-wrapper"]
neuronxcc-2.21.18209.0+043b1bf7/MODULE_9169e2934f6040938d53+ca355898/model.done ADDED
File without changes
neuronxcc-2.21.18209.0+043b1bf7/MODULE_9169e2934f6040938d53+ca355898/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4611c39d4c44aeb5aeb06fcd310e6a26589ff1db1e7846197423903a584ac3b1
3
+ size 1871938
neuronxcc-2.21.18209.0+043b1bf7/MODULE_9169e2934f6040938d53+ca355898/model.neff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad3713f6cd4d059afce0c9b43b71ff67121825fd28e4e6e81c762e9742f18532
3
+ size 4137984
neuronxcc-2.21.18209.0+043b1bf7/MODULE_9169e2934f6040938d53+ca355898/wrapped_neff.hlo ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bb8b11f326503b7664f9dcf985027fb9bc86519881b4bf4ea922df2717ed451
3
+ size 4465808
neuronxcc-2.21.18209.0+043b1bf7/MODULE_9aa5b581f9d17855f430+c2248236/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--enable-saturate-infinity", "--enable-mixed-precision-accumulation", "--model-type", "transformer", "-O1", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2", "--auto-cast=none", "--internal-enable-dge-levels", "vector_dynamic_offsets", "--logfile=/tmp/nxd_model/context_encoding_model/_tp0_bk0/log-neuron-cc.txt"]
neuronxcc-2.21.18209.0+043b1bf7/MODULE_9aa5b581f9d17855f430+c2248236/model.done ADDED
File without changes
neuronxcc-2.21.18209.0+043b1bf7/MODULE_9aa5b581f9d17855f430+c2248236/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef6b17ede21f26033567a043fd3e7f523cd309614b59df81275a126d35530dbe
3
+ size 2544280
neuronxcc-2.21.18209.0+043b1bf7/MODULE_9aa5b581f9d17855f430+c2248236/model.neff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8a353febcec15379af788f7b882e117ad33a83647d6f54846bcdf8c91c68421
3
+ size 5499904