Text Generation
MLX
Safetensors
qwen2
chat
conversational
Eval Results
4-bit precision
jy-hxy commited on
Commit
4632720
1 Parent(s): 0a6c17e

Upload folder using huggingface_hub (#1)

Browse files

- 660219ad7575796f8abdab78f55b84d47658d8405ab1c24dccdcaee2dcc6980a (b0e94aadbb179951c70e7dce508d902646149cfc)
- 6004b88b98b568fc7c0552c8764528230bc3ff0b5bbface37e56d8e8ef86b9d5 (21436c115e1d3ad2669aaa03cb57d4ec06525ba1)
- 462365a7f01e77151e6233baf56cb0f5621aff254e065ee78638589f09579d05 (71b3878d13e088ec0b3d10a79ff34c7c758f2186)
- 97ff10c7cf7d48bd444d839bff4775e2256ae35ccc23663fdd04644881cc8a9b (09fe8365be6df89e97ff0cf0f7bd63b4d9cf3c0d)
- a7522c8be2bb0346e7de006da761b61b7b3b95240e5d1aa39096eca0a20899d5 (0b9cf7012a13d866af849db565a4c9c8a7fdde28)
- 7f0ad04f8313de1b22d464ed071c44bd67e3a98527ddcd2552b863e7595d2842 (fa554c3ac877528eb21aee532acc21d3da164d08)
- 7e2cdec0bc68134ce83d307993a0ec2e202a46b10747060eef762c0a1ea3ef9f (5696f67b3c69f88d4c6da0ba17e68f567c654520)
- 818d911845a73f45f12ff9fcc9b653461a0785072adee4580a14870bb5a5ce51 (86a0742669784f47a09357bbfad9ef44b177a733)
- 092a9d76a0cf672891d17247453dfb97b739dbf5e5874688d4bf875db8bf3e12 (b432d4b59dd755ceae2a206c69448020e87ce40b)

.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - fr
5
+ - de
6
+ - es
7
+ - it
8
+ - pt
9
+ - ru
10
+ - zh
11
+ - ja
12
+ license: other
13
+ tags:
14
+ - chat
15
+ - mlx
16
+ base_model: anthracite-org/magnum-v2-72b
17
+ datasets:
18
+ - Doctor-Shotgun/C2-Stheno
19
+ - anthracite-org/kalo-opus-instruct-22k-no-refusal
20
+ - anthracite-org/nopm_claude_writing_fixed
21
+ license_name: tongyi-qianwen
22
+ license_link: https://huggingface.co/anthracite-org/magnum-v2-72b/blob/main/LICENSE
23
+ pipeline_tag: text-generation
24
+ model-index:
25
+ - name: magnum-v2-72b
26
+ results:
27
+ - task:
28
+ type: text-generation
29
+ name: Text Generation
30
+ dataset:
31
+ name: IFEval (0-Shot)
32
+ type: HuggingFaceH4/ifeval
33
+ args:
34
+ num_few_shot: 0
35
+ metrics:
36
+ - type: inst_level_strict_acc and prompt_level_strict_acc
37
+ value: 75.6
38
+ name: strict accuracy
39
+ source:
40
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=anthracite-org/magnum-v2-72b
41
+ name: Open LLM Leaderboard
42
+ - task:
43
+ type: text-generation
44
+ name: Text Generation
45
+ dataset:
46
+ name: BBH (3-Shot)
47
+ type: BBH
48
+ args:
49
+ num_few_shot: 3
50
+ metrics:
51
+ - type: acc_norm
52
+ value: 57.85
53
+ name: normalized accuracy
54
+ source:
55
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=anthracite-org/magnum-v2-72b
56
+ name: Open LLM Leaderboard
57
+ - task:
58
+ type: text-generation
59
+ name: Text Generation
60
+ dataset:
61
+ name: MATH Lvl 5 (4-Shot)
62
+ type: hendrycks/competition_math
63
+ args:
64
+ num_few_shot: 4
65
+ metrics:
66
+ - type: exact_match
67
+ value: 31.65
68
+ name: exact match
69
+ source:
70
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=anthracite-org/magnum-v2-72b
71
+ name: Open LLM Leaderboard
72
+ - task:
73
+ type: text-generation
74
+ name: Text Generation
75
+ dataset:
76
+ name: GPQA (0-shot)
77
+ type: Idavidrein/gpqa
78
+ args:
79
+ num_few_shot: 0
80
+ metrics:
81
+ - type: acc_norm
82
+ value: 18.12
83
+ name: acc_norm
84
+ source:
85
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=anthracite-org/magnum-v2-72b
86
+ name: Open LLM Leaderboard
87
+ - task:
88
+ type: text-generation
89
+ name: Text Generation
90
+ dataset:
91
+ name: MuSR (0-shot)
92
+ type: TAUR-Lab/MuSR
93
+ args:
94
+ num_few_shot: 0
95
+ metrics:
96
+ - type: acc_norm
97
+ value: 14.18
98
+ name: acc_norm
99
+ source:
100
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=anthracite-org/magnum-v2-72b
101
+ name: Open LLM Leaderboard
102
+ - task:
103
+ type: text-generation
104
+ name: Text Generation
105
+ dataset:
106
+ name: MMLU-PRO (5-shot)
107
+ type: TIGER-Lab/MMLU-Pro
108
+ config: main
109
+ split: test
110
+ args:
111
+ num_few_shot: 5
112
+ metrics:
113
+ - type: acc
114
+ value: 49.51
115
+ name: accuracy
116
+ source:
117
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=anthracite-org/magnum-v2-72b
118
+ name: Open LLM Leaderboard
119
+ ---
120
+
121
+ # mlx-community/magnum-v2-72b-4bit
122
+
123
+ The Model [mlx-community/magnum-v2-72b-4bit](https://huggingface.co/mlx-community/magnum-v2-72b-4bit) was
124
+ converted to MLX format from [anthracite-org/magnum-v2-72b](https://huggingface.co/anthracite-org/magnum-v2-72b)
125
+ using mlx-lm version **0.20.4**.
126
+
127
+ ## Use with mlx
128
+
129
+ ```bash
130
+ pip install mlx-lm
131
+ ```
132
+
133
+ ```python
134
+ from mlx_lm import load, generate
135
+
136
+ model, tokenizer = load("mlx-community/magnum-v2-72b-4bit")
137
+
138
+ prompt="hello"
139
+
140
+ if hasattr(tokenizer, "apply_chat_template") and tokenizer.chat_template is not None:
141
+ messages = [{"role": "user", "content": prompt}]
142
+ prompt = tokenizer.apply_chat_template(
143
+ messages, tokenize=False, add_generation_prompt=True
144
+ )
145
+
146
+ response = generate(model, tokenizer, prompt=prompt, verbose=True)
147
+ ```
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "eos_token_id": 151645,
7
+ "hidden_act": "silu",
8
+ "hidden_size": 8192,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 29568,
11
+ "max_position_embeddings": 32768,
12
+ "max_window_layers": 80,
13
+ "model_type": "qwen2",
14
+ "num_attention_heads": 64,
15
+ "num_hidden_layers": 80,
16
+ "num_key_value_heads": 8,
17
+ "quantization": {
18
+ "group_size": 64,
19
+ "bits": 4
20
+ },
21
+ "quantization_config": {
22
+ "group_size": 64,
23
+ "bits": 4
24
+ },
25
+ "rms_norm_eps": 1e-06,
26
+ "rope_theta": 1000000.0,
27
+ "sliding_window": null,
28
+ "tie_word_embeddings": false,
29
+ "torch_dtype": "bfloat16",
30
+ "transformers_version": "4.43.4",
31
+ "use_cache": false,
32
+ "use_sliding_window": false,
33
+ "vocab_size": 152064
34
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf6cb2336bd6567bc5602cb02971b9ff971aacba656fc51fb6305ba1c80e8b90
3
+ size 5365567669
model-00002-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:422eef2c7933a01c9268c5885b3e7356394372f1b9d8117cf44b77a13c72d7e4
3
+ size 5294878254
model-00003-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:459afd45085e438cf2bad806f11e0cbd4913713b4c5b34ab7c66731d401febbe
3
+ size 5346171097
model-00004-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8e53cdaf20a46968290a2cf0922eee000a733e5f3aa7844ccdf8437ea956810
3
+ size 5294845297
model-00005-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a3fdca3bd8c4a87867abe3b8e1fd57e4d77f39b5b58ed9743f1c7cf7268b3fe
3
+ size 5294878217
model-00006-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5de7441a2a58450c5b700216a203b3a4efb44858d8ded4b3534b8648e147855
3
+ size 5294878204
model-00007-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed2646d3348f270cf9820272ab0ec78517147b001620a2cf8eb3bba476418356
3
+ size 5346171091
model-00008-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22a7846478c49fedf03ca7415e0e363295795d43e7c816856a3c9e5738d4edff
3
+ size 3663161100
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcfe42da0a4497e8b2b172c1f9f4ec423a46dc12907f4349c55025f670422ba9
3
+ size 11418266
tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "extra_special_tokens": {},
39
+ "model_max_length": 131072,
40
+ "pad_token": "<|endoftext|>",
41
+ "split_special_tokens": false,
42
+ "tokenizer_class": "Qwen2Tokenizer",
43
+ "unk_token": null
44
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff