Abhinav Agarwalla commited on
Commit
91f7aa0
1 Parent(s): b5a9b63

Updating pruned50-quant model

Browse files
Files changed (7) hide show
  1. README.md +2 -2
  2. config.json +4 -3
  3. model-orig.onnx +2 -2
  4. model.data +2 -2
  5. model.onnx +2 -2
  6. tokenizer.json +1 -1
  7. tokenizer_config.json +0 -4
README.md CHANGED
@@ -50,8 +50,8 @@ Model evaluation metrics and results.
50
 
51
  | Benchmark | Metric | Llama-2-7b-evolcodealpaca | Llama-2-7b-pruned50-retrained-evolcodealpaca-quant-ds |
52
  |------------------------------------------------|---------------|-------------|-------------------------------|
53
- | [HumanEval](https://arxiv.org/abs/2107.03374) | pass@1 | 32.03 | 32.2 |
54
 
55
  ## Help
56
 
57
- For further support, and discussions on these models and AI in general, join [Neural Magic's Slack Community](https://join.slack.com/t/discuss-neuralmagic/shared_invite/zt-q1a1cnvo-YBoICSIw3L1dmQpjBeDurQ)
 
50
 
51
  | Benchmark | Metric | Llama-2-7b-evolcodealpaca | Llama-2-7b-pruned50-retrained-evolcodealpaca-quant-ds |
52
  |------------------------------------------------|---------------|-------------|-------------------------------|
53
+ | [HumanEval](https://arxiv.org/abs/2107.03374) | pass@1 | 32.03 | 36.34 |
54
 
55
  ## Help
56
 
57
+ For further support, and discussions on these models and AI in general, join [Neural Magic's Slack Community](https://join.slack.com/t/discuss-neuralmagic/shared_invite/zt-q1a1cnvo-YBoICSIw3L1dmQpjBeDurQ)
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/home/abhinav/src/llama-recipes/llama_7b_evol_codealpaca_sparse/sparse-sft_cerebras_50sp_llama_lr5e-4_epochs3_gradclip5.0_cosine-/combined/",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
@@ -21,8 +21,9 @@
21
  "rope_scaling": null,
22
  "rope_theta": 10000.0,
23
  "tie_word_embeddings": false,
24
- "torch_dtype": "float32",
25
- "transformers_version": "1.7.0.20240312",
 
26
  "use_cache": true,
27
  "vocab_size": 32000
28
  }
 
1
  {
2
+ "_name_or_path": "neuralmagic/Llama-2-7b-pruned50-retrained-evolcodealpaca-quant-ds",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
21
  "rope_scaling": null,
22
  "rope_theta": 10000.0,
23
  "tie_word_embeddings": false,
24
+ "tokenizer_class": "LlamaTokenizerFast",
25
+ "torch_dtype": "float16",
26
+ "transformers_version": "1.7.0.20240506",
27
  "use_cache": true,
28
  "vocab_size": 32000
29
  }
model-orig.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16cb24f5b49e63173e59d9da7164a74fe968a51ddf20ef30a65dd73b87342390
3
- size 1220058
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d363691fa0eab6564db9d26c5603fd328d74a9860e2d73e962f9d000913181df
3
+ size 1222751
model.data CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:48e44b79d03f06427edc1d451b31169c04966366fff80933784bb4d84df0f213
3
- size 7425272832
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa7081720c95cd1692e42b5cc95b1fded0d9b7560058076bbc9168835a42372c
3
+ size 7154772992
model.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:db93bc16d048df38acd6eaa934e5d1bb0beca2becc6023ea0ebcdfb090a697ee
3
- size 1205003
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a6ad1f6b97d487e6966d5b7902b2aec84bf28771841d3c09d72bf73d75591e4
3
+ size 1207696
tokenizer.json CHANGED
@@ -2,7 +2,7 @@
2
  "version": "1.0",
3
  "truncation": {
4
  "direction": "Right",
5
- "max_length": 2048,
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
 
2
  "version": "1.0",
3
  "truncation": {
4
  "direction": "Right",
5
+ "max_length": 384,
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
tokenizer_config.json CHANGED
@@ -29,15 +29,11 @@
29
  "clean_up_tokenization_spaces": false,
30
  "eos_token": "</s>",
31
  "legacy": false,
32
- "max_length": 2048,
33
  "model_max_length": 1000000000000000019884624838656,
34
  "pad_token": "</s>",
35
  "padding_side": "right",
36
  "sp_model_kwargs": {},
37
- "stride": 0,
38
  "tokenizer_class": "LlamaTokenizer",
39
- "truncation_side": "right",
40
- "truncation_strategy": "longest_first",
41
  "unk_token": "<unk>",
42
  "use_default_system_prompt": false
43
  }
 
29
  "clean_up_tokenization_spaces": false,
30
  "eos_token": "</s>",
31
  "legacy": false,
 
32
  "model_max_length": 1000000000000000019884624838656,
33
  "pad_token": "</s>",
34
  "padding_side": "right",
35
  "sp_model_kwargs": {},
 
36
  "tokenizer_class": "LlamaTokenizer",
 
 
37
  "unk_token": "<unk>",
38
  "use_default_system_prompt": false
39
  }