nintwentydo commited on
Commit
52a6229
1 Parent(s): 1f7a7fe

Add files using upload-large-folder tool

Browse files
README.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - fr
5
+ - de
6
+ - es
7
+ - it
8
+ - pt
9
+ - zh
10
+ - ja
11
+ - ru
12
+ - ko
13
+ license: other
14
+ license_name: mrl
15
+ base_model: mistralai/Pixtral-Large-Instruct-2411
16
+ base_model_relation: quantized
17
+ inference: false
18
+ license_link: https://mistral.ai/licenses/MRL-0.1.md
19
+ library_name: transformers
20
+ pipeline_tag: image-text-to-text
21
+ ---
22
+
23
+ # Pixtral-Large-Instruct-2411 🧡 ExLlamaV2 6.0bpw Quant
24
+
25
+ 6.0bpw quant of [Pixtral-Large-Instruct](https://huggingface.co/nintwentydo/Pixtral-Large-Instruct-2411).
26
+
27
+ Vision inputs working on dev branch of [ExLlamaV2](https://github.com/turboderp/exllamav2/tree/dev).
28
+
29
+
30
+ ## Tokenizer And Prompt Template
31
+ Using conversion of v7m1 tokenizer with 32k vocab size.
32
+
33
+ Chat template in chat_template.json uses the v7 instruct template:
34
+
35
+ ```
36
+ <s>[SYSTEM_PROMPT] <system prompt>[/SYSTEM_PROMPT][INST] <user message>[/INST] <assistant response></s>[INST] <user message>[/INST]
37
+ ```
38
+
39
+ ## Available Sizes
40
+ | Repo | Bits | Head Bits | Size |
41
+ | ----------- | ------ | ------ | ------ |
42
+ | [nintwentydo/Pixtral-Large-Instruct-2411-exl2-2.0bpw](https://huggingface.co/nintwentydo/Pixtral-Large-Instruct-2411-exl2-2.0bpw) | 2.0 | 6.0 | 35.18 GB |
43
+ | [nintwentydo/Pixtral-Large-Instruct-2411-exl2-2.5bpw](https://huggingface.co/nintwentydo/Pixtral-Large-Instruct-2411-exl2-2.5bpw) | 2.5 | 6.0 | 39.34 GB |
44
+ | [nintwentydo/Pixtral-Large-Instruct-2411-exl2-3.0bpw](https://huggingface.co/nintwentydo/Pixtral-Large-Instruct-2411-exl2-3.0bpw) | 3.0 | 6.0 | 46.42 GB |
45
+ | [nintwentydo/Pixtral-Large-Instruct-2411-exl2-3.5bpw](https://huggingface.co/nintwentydo/Pixtral-Large-Instruct-2411-exl2-3.5bpw) | 3.5 | 6.0 | 53.50 GB |
46
+ | [nintwentydo/Pixtral-Large-Instruct-2411-exl2-4.0bpw](https://huggingface.co/nintwentydo/Pixtral-Large-Instruct-2411-exl2-4.0bpw) | 4.0 | 6.0 | 60.61GB |
47
+ | [nintwentydo/Pixtral-Large-Instruct-2411-exl2-5.0bpw](https://huggingface.co/nintwentydo/Pixtral-Large-Instruct-2411-exl2-5.0bpw) | 5.0 | 6.0 | 74.76 GB |
48
+ | [nintwentydo/Pixtral-Large-Instruct-2411-exl2-6.0bpw](https://huggingface.co/nintwentydo/Pixtral-Large-Instruct-2411-exl2-6.0bpw) | 6.0 | 8.0 | 88.81GB |
chat_template.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "chat_template": "{{- bos_token }} \n{%- for message in messages %} \n {%- if message['role'] == 'user' %} \n {{- '[INST]' + ' ' }} \n {%- if message['content'] is not string %} \n {%- for chunk in message['content'] %} \n {%- if chunk['type'] == 'text' %} \n {{- chunk['content'] }} \n {%- elif chunk['type'] == 'image' %} \n {{- '[IMG]' }} \n {%- else %} \n {{- raise_exception('Unrecognized content type!') }} \n {%- endif %} \n {%- endfor %} \n {%- else %} \n {{- message['content'] }} \n {%- endif %} \n {{- '[\/INST]' }} \n {%- if not loop.last and messages[loop.index]['role'] == 'user' %} \n {{- eos_token }} \n {%- endif %} \n {%- elif message['role'] == 'system' %} \n {{- '[SYSTEM_PROMPT] ' + message['content'] + '[\/SYSTEM_PROMPT]' }} \n {%- elif message['role'] == 'assistant' %} \n {{- ' ' + message['content'] + eos_token }} \n {%- else %} \n {{- raise_exception('Only user, system and assistant roles are supported!') }} \n {%- endif %} \n{%- endfor %}"
3
+ }
config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlavaForConditionalGeneration"
4
+ ],
5
+ "ignore_index": -100,
6
+ "image_seq_length": 1,
7
+ "image_token_index": 10,
8
+ "model_type": "llava",
9
+ "multimodal_projector_bias": false,
10
+ "projector_hidden_act": "gelu",
11
+ "text_config": {
12
+ "hidden_size": 12288,
13
+ "intermediate_size": 28672,
14
+ "is_composition": true,
15
+ "max_position_embeddings": 131072,
16
+ "model_type": "mistral",
17
+ "norm_eps": 1e-05,
18
+ "rms_norm_eps": 1e-05,
19
+ "num_attention_heads": 96,
20
+ "num_hidden_layers": 88,
21
+ "num_key_value_heads": 8,
22
+ "rope_theta": 1000000000.0,
23
+ "sliding_window": null,
24
+ "vocab_size": 32768
25
+ },
26
+ "torch_dtype": "bfloat16",
27
+ "transformers_version": "4.47.0.dev0",
28
+ "vision_config": {
29
+ "head_dim": 88,
30
+ "hidden_act": "silu",
31
+ "hidden_size": 1408,
32
+ "image_size": 1024,
33
+ "image_token_id": 10,
34
+ "intermediate_size": 6144,
35
+ "model_type": "pixtral",
36
+ "num_hidden_layers": 40,
37
+ "num_attention_heads": 16,
38
+ "patch_size": 16,
39
+ "rope_theta": 10000.0
40
+ },
41
+ "vision_feature_layer": -1,
42
+ "vision_feature_select_strategy": "full",
43
+ "quantization_config": {
44
+ "quant_method": "exl2",
45
+ "version": "0.2.6",
46
+ "bits": 6.0,
47
+ "head_bits": 8,
48
+ "calibration": {
49
+ "rows": 115,
50
+ "length": 2048,
51
+ "dataset": "(default)"
52
+ }
53
+ }
54
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.47.0.dev0"
6
+ }
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
output-00001-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a736d9cafe7744d7409478d4ececd89e72c685023f0bd8853a3d05d78a89390
3
+ size 10683689220
output-00002-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:448048282bc88555cf7e082d8c8e2bba4ac5fc3e48248722e3c8ba288dc13d86
3
+ size 10576494032
output-00003-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc84a8ff46e1cf27c1f8bfc982666091f68e96ba92a6debce6f87a5994c71f2a
3
+ size 10696793948
output-00004-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd1f855ebb182bcf2d1a981dc720e155bd896ce3254f46a6032977358906353a
3
+ size 10727925264
output-00005-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20e858e283c979b2f969534c0d1ef711cced5dcda4e6b965ace77ca49b5faa5d
3
+ size 10641505964
output-00006-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4860143d9308514a55a92c6e7805d7feb434a4aac7a4a648ba7139a7ac831c5f
3
+ size 10719560992
output-00007-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6206e93069fc0466f736938dc09c507de177f5ed6520531a9184f7c8b8c080b
3
+ size 10650360532
output-00008-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff313d0010e633fdc7cd6e28fc0ba30d849660d1faaa43746aec704e1238a30a
3
+ size 10471778584
output-00009-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed903113775573ee1132a00c3082dfae44f060a8aa8bd601aeb8bfae1a2d711e
3
+ size 10186088152
preprocessor_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": true,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.48145466,
8
+ 0.4578275,
9
+ 0.40821073
10
+ ],
11
+ "image_processor_type": "PixtralImageProcessor",
12
+ "image_std": [
13
+ 0.26862954,
14
+ 0.26130258,
15
+ 0.27577711
16
+ ],
17
+ "patch_size": {
18
+ "height": 16,
19
+ "width": 16
20
+ },
21
+ "processor_class": "PixtralProcessor",
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "longest_edge": 1024
26
+ }
27
+ }
processor_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "image_break_token": "[IMG_BREAK]",
3
+ "image_end_token": "[IMG_END]",
4
+ "image_token": "[IMG]",
5
+ "patch_size": 16,
6
+ "processor_class": "PixtralProcessor"
7
+ }
special_tokens_map.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b968b8dc352f42192367337c78ccc61e1eaddc6d641a579372d4f20694beb7a
3
+ size 587562
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff