nintwentydo commited on
Commit
3b98499
1 Parent(s): 1df1eaf

Add files using upload-large-folder tool

Browse files
README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - fr
5
+ - de
6
+ - es
7
+ - it
8
+ - pt
9
+ - zh
10
+ - ja
11
+ - ru
12
+ - ko
13
+ license: other
14
+ license_name: mrl
15
+ base_model: mistralai/Pixtral-Large-Instruct-2411
16
+ base_model_relation: quantized
17
+ inference: false
18
+ license_link: https://mistral.ai/licenses/MRL-0.1.md
19
+ library_name: transformers
20
+ pipeline_tag: image-text-to-text
21
+ ---
22
+
23
+ # Pixtral-Large-Instruct-2411 🧡 ExLlamaV2 4.5bpw Quant
24
+
25
+ 4.5bpw quant of [Pixtral-Large-Instruct](https://huggingface.co/nintwentydo/Pixtral-Large-Instruct-2411).
26
+
27
+ Vision inputs working on dev branch of [ExLlamaV2](https://github.com/turboderp/exllamav2/tree/dev).
28
+
29
+
30
+ ## Tokenizer And Prompt Template
31
+ Using conversion of v7m1 tokenizer with 32k vocab size.
32
+
33
+ Chat template in chat_template.json uses the v7 instruct template:
34
+
35
+ ```
36
+ <s>[SYSTEM_PROMPT] <system prompt>[/SYSTEM_PROMPT][INST] <user message>[/INST] <assistant response></s>[INST] <user message>[/INST]
37
+ ```
38
+
39
+ ## Available Sizes
40
+ | Repo | Bits | Head Bits | Size |
41
+ | ----------- | ------ | ------ | ------ |
42
+ | [nintwentydo/Pixtral-Large-Instruct-2411-exl2-2.0bpw](https://huggingface.co/nintwentydo/Pixtral-Large-Instruct-2411-exl2-2.0bpw) | 2.0 | 6.0 | 35.18 GB |
43
+ | [nintwentydo/Pixtral-Large-Instruct-2411-exl2-2.5bpw](https://huggingface.co/nintwentydo/Pixtral-Large-Instruct-2411-exl2-2.5bpw) | 2.5 | 6.0 | 39.34 GB |
44
+ | [nintwentydo/Pixtral-Large-Instruct-2411-exl2-3.0bpw](https://huggingface.co/nintwentydo/Pixtral-Large-Instruct-2411-exl2-3.0bpw) | 3.0 | 6.0 | 46.42 GB |
45
+ | [nintwentydo/Pixtral-Large-Instruct-2411-exl2-3.5bpw](https://huggingface.co/nintwentydo/Pixtral-Large-Instruct-2411-exl2-3.5bpw) | 3.5 | 6.0 | 53.50 GB |
46
+ | [nintwentydo/Pixtral-Large-Instruct-2411-exl2-4.0bpw](https://huggingface.co/nintwentydo/Pixtral-Large-Instruct-2411-exl2-4.0bpw) | 4.0 | 6.0 | 60.61 GB |
47
+ | [nintwentydo/Pixtral-Large-Instruct-2411-exl2-4.5bpw](https://huggingface.co/nintwentydo/Pixtral-Large-Instruct-2411-exl2-4.5bpw) | 4.5 | 6.0 | 67.68 GB |
48
+ | [nintwentydo/Pixtral-Large-Instruct-2411-exl2-5.0bpw](https://huggingface.co/nintwentydo/Pixtral-Large-Instruct-2411-exl2-5.0bpw) | 5.0 | 6.0 | 74.76 GB |
49
+ | [nintwentydo/Pixtral-Large-Instruct-2411-exl2-6.0bpw](https://huggingface.co/nintwentydo/Pixtral-Large-Instruct-2411-exl2-6.0bpw) | 6.0 | 8.0 | 88.81 GB |
50
+ | [nintwentydo/Pixtral-Large-Instruct-2411-exl2-8.0bpw](https://huggingface.co/nintwentydo/Pixtral-Large-Instruct-2411-exl2-8.0bpw) | 8.0 | 8.0 | 97.51 GB |
chat_template.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "chat_template": "{{- bos_token }} \n{%- for message in messages %} \n {%- if message['role'] == 'user' %} \n {{- '[INST]' + ' ' }} \n {%- if message['content'] is not string %} \n {%- for chunk in message['content'] %} \n {%- if chunk['type'] == 'text' %} \n {{- chunk['content'] }} \n {%- elif chunk['type'] == 'image' %} \n {{- '[IMG]' }} \n {%- else %} \n {{- raise_exception('Unrecognized content type!') }} \n {%- endif %} \n {%- endfor %} \n {%- else %} \n {{- message['content'] }} \n {%- endif %} \n {{- '[\/INST]' }} \n {%- if not loop.last and messages[loop.index]['role'] == 'user' %} \n {{- eos_token }} \n {%- endif %} \n {%- elif message['role'] == 'system' %} \n {{- '[SYSTEM_PROMPT] ' + message['content'] + '[\/SYSTEM_PROMPT]' }} \n {%- elif message['role'] == 'assistant' %} \n {{- ' ' + message['content'] + eos_token }} \n {%- else %} \n {{- raise_exception('Only user, system and assistant roles are supported!') }} \n {%- endif %} \n{%- endfor %}"
3
+ }
config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlavaForConditionalGeneration"
4
+ ],
5
+ "ignore_index": -100,
6
+ "image_seq_length": 1,
7
+ "image_token_index": 10,
8
+ "model_type": "llava",
9
+ "multimodal_projector_bias": false,
10
+ "projector_hidden_act": "gelu",
11
+ "text_config": {
12
+ "hidden_size": 12288,
13
+ "intermediate_size": 28672,
14
+ "is_composition": true,
15
+ "max_position_embeddings": 131072,
16
+ "model_type": "mistral",
17
+ "norm_eps": 1e-05,
18
+ "rms_norm_eps": 1e-05,
19
+ "num_attention_heads": 96,
20
+ "num_hidden_layers": 88,
21
+ "num_key_value_heads": 8,
22
+ "rope_theta": 1000000000.0,
23
+ "sliding_window": null,
24
+ "vocab_size": 32768
25
+ },
26
+ "torch_dtype": "bfloat16",
27
+ "transformers_version": "4.47.0.dev0",
28
+ "vision_config": {
29
+ "head_dim": 88,
30
+ "hidden_act": "silu",
31
+ "hidden_size": 1408,
32
+ "image_size": 1024,
33
+ "image_token_id": 10,
34
+ "intermediate_size": 6144,
35
+ "model_type": "pixtral",
36
+ "num_hidden_layers": 40,
37
+ "num_attention_heads": 16,
38
+ "patch_size": 16,
39
+ "rope_theta": 10000.0
40
+ },
41
+ "vision_feature_layer": -1,
42
+ "vision_feature_select_strategy": "full",
43
+ "quantization_config": {
44
+ "quant_method": "exl2",
45
+ "version": "0.2.6",
46
+ "bits": 4,
47
+ "head_bits": 6,
48
+ "calibration": {
49
+ "rows": 115,
50
+ "length": 2048,
51
+ "dataset": "(default)"
52
+ }
53
+ }
54
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.47.0.dev0"
6
+ }
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
output-00001-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4458c754938d72a5d6d77cd84dd25730a1226ee15db795e5b3b60008d2178f93
3
+ size 10637803612
output-00002-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:979364d96ba516240d7711f10424bac4b9fa04246b6295f7c58b24da8164549d
3
+ size 10693012380
output-00003-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:120777d3c3acac55e9e11a6e0b8bc8dd8741999fdf45555472b6e102867b7de1
3
+ size 10640872912
output-00004-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa81527c6696643e881a7bd46cff6141138c121d4f4f8808099d0afd4284479b
3
+ size 10596988908
output-00005-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13204236ed60d20028d0b71886a61aaf7c7ddb76e71f24666a4e525d2bb3aca6
3
+ size 10570244992
output-00006-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9b9556cc2c5a79d57ed15c6c2766f037c6a81e5d7dec596536ee8dc7becd435
3
+ size 10647191720
output-00007-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6957a12308b0f6ac5a37d147fa5fd0f387a767819f407d6de8a17d404299e1f
3
+ size 8888004708
preprocessor_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": true,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.48145466,
8
+ 0.4578275,
9
+ 0.40821073
10
+ ],
11
+ "image_processor_type": "PixtralImageProcessor",
12
+ "image_std": [
13
+ 0.26862954,
14
+ 0.26130258,
15
+ 0.27577711
16
+ ],
17
+ "patch_size": {
18
+ "height": 16,
19
+ "width": 16
20
+ },
21
+ "processor_class": "PixtralProcessor",
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "longest_edge": 1024
26
+ }
27
+ }
processor_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "image_break_token": "[IMG_BREAK]",
3
+ "image_end_token": "[IMG_END]",
4
+ "image_token": "[IMG]",
5
+ "patch_size": 16,
6
+ "processor_class": "PixtralProcessor"
7
+ }
special_tokens_map.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b968b8dc352f42192367337c78ccc61e1eaddc6d641a579372d4f20694beb7a
3
+ size 587562
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff