Text Generation
Transformers
Safetensors
llama
sparse
code
Inference Endpoints
text-generation-inference
abhinavnmagic commited on
Commit
81c45d8
1 Parent(s): cf3b84f

Updating pruned50 model

Browse files
README.md CHANGED
@@ -51,7 +51,7 @@ Model evaluation metrics and results.
51
 
52
  | Benchmark | Metric | Llama-2-7b-evolcodealpaca | Llama-2-7b-pruned50-retrained-evolcodealpaca |
53
  |------------------------------------------------|---------------|-------------|-------------------------------|
54
- | [HumanEval](https://arxiv.org/abs/2107.03374) | pass@1 | 32.03 | 32.7 |
55
 
56
  ## Model Training Details
57
 
@@ -60,4 +60,4 @@ Training was perfomerd for 2 epochs and used the [SquareHead](https://arxiv.org/
60
 
61
  ## Help
62
 
63
- For further support, and discussions on these models and AI in general, join [Neural Magic's Slack Community](https://join.slack.com/t/discuss-neuralmagic/shared_invite/zt-q1a1cnvo-YBoICSIw3L1dmQpjBeDurQ)
 
51
 
52
  | Benchmark | Metric | Llama-2-7b-evolcodealpaca | Llama-2-7b-pruned50-retrained-evolcodealpaca |
53
  |------------------------------------------------|---------------|-------------|-------------------------------|
54
+ | [HumanEval](https://arxiv.org/abs/2107.03374) | pass@1 | 32.03 | 38.15 |
55
 
56
  ## Model Training Details
57
 
 
60
 
61
  ## Help
62
 
63
+ For further support, and discussions on these models and AI in general, join [Neural Magic's Slack Community](https://join.slack.com/t/discuss-neuralmagic/shared_invite/zt-q1a1cnvo-YBoICSIw3L1dmQpjBeDurQ)
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "neuralmagic/Llama-2-7b-pruned50-retrained-evolcodealpaca",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
@@ -21,8 +21,9 @@
21
  "rope_scaling": null,
22
  "rope_theta": 10000.0,
23
  "tie_word_embeddings": false,
 
24
  "torch_dtype": "bfloat16",
25
- "transformers_version": "4.40.0",
26
  "use_cache": true,
27
  "vocab_size": 32000
28
  }
 
1
  {
2
+ "_name_or_path": ".",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
21
  "rope_scaling": null,
22
  "rope_theta": 10000.0,
23
  "tie_word_embeddings": false,
24
+ "tokenizer_class": "LlamaTokenizerFast",
25
  "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.39.3",
27
  "use_cache": true,
28
  "vocab_size": 32000
29
  }
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
- "transformers_version": "4.40.0"
6
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
+ "transformers_version": "4.39.3"
6
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3acb50b1669e03513afcae71e26801b82295c6acc45800bdd1e13410fb7cfcfb
3
  size 4938985352
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:184a7dbb9d6dd5dda7364519c5c5d62d354aeb0c8dcbd2b9bff328e34a170c81
3
  size 4938985352
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c728ac27cabffac751ea52110447918dcec944d3fd845185d795536eb027ec07
3
  size 4947390880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9dcf450d4603fa17fa73f9d8cd107239ed5436b23f6a310c2efaaf96584f1cc8
3
  size 4947390880
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0029f629aa518455d7b12ee12459dea2f943e7e923f2126c6d091bc537553cab
3
  size 3590488816
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f7b268010672b6c6db49ee36a8a357fab231b171b67d3732707c130cc28a144
3
  size 3590488816