Dracones commited on
Commit
ebf74f9
1 Parent(s): bce320d

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: apache-2.0
5
+ base_model: microsoft/WizardLM-2-8x22B
6
+ tags:
7
+ - exl2
8
+ ---
9
+
10
+ # WizardLM-2-8x22B - EXL2 7.0bpw
11
+
12
+ This is a 7.0bpw EXL2 quant of [microsoft/WizardLM-2-8x22B](https://huggingface.co/microsoft/WizardLM-2-8x22B)
13
+
14
+ Details about the model can be found at the above model page.
15
+
16
+ ## EXL2 Version
17
+
18
+ These quants were made with exllamav2 version 0.0.18. Quants made on this version of EXL2 may not work on older versions of the exllamav2 library.
19
+
20
+ If you have problems loading these models, please update Text Generation WebUI to the latest version.
21
+
22
+
23
+
24
+ ## Quant Details
25
+
26
+ This is the script used for quantization.
27
+
28
+ ```bash
29
+ #!/bin/bash
30
+
31
+ # Activate the conda environment
32
+ source ~/miniconda3/etc/profile.d/conda.sh
33
+ conda activate exllamav2
34
+
35
+ # Set the model name and bit size
36
+ MODEL_NAME="WizardLM-2-8x22B"
37
+
38
+ # Define variables
39
+ MODEL_DIR="/mnt/storage/models/$MODEL_NAME"
40
+ OUTPUT_DIR="exl2_$MODEL_NAME"
41
+ MEASUREMENT_FILE="measurements/$MODEL_NAME.json"
42
+
43
+ # Create the measurement file if needed
44
+ if [ ! -f "$MEASUREMENT_FILE" ]; then
45
+ echo "Creating $MEASUREMENT_FILE"
46
+ # Create directories
47
+ if [ -d "$OUTPUT_DIR" ]; then
48
+ rm -r "$OUTPUT_DIR"
49
+ fi
50
+ mkdir "$OUTPUT_DIR"
51
+
52
+ python convert.py -i $MODEL_DIR -o $OUTPUT_DIR -nr -om $MEASUREMENT_FILE
53
+ fi
54
+
55
+ # Choose one of the below. Either create a single quant for testing or a batch of them.
56
+ # BIT_PRECISIONS=(2.25)
57
+ BIT_PRECISIONS=(5.0 4.5 4.0 3.5 3.0 2.75 2.5 2.25)
58
+
59
+ for BIT_PRECISION in "${BIT_PRECISIONS[@]}"
60
+ do
61
+ CONVERTED_FOLDER="models/${MODEL_NAME}_exl2_${BIT_PRECISION}bpw"
62
+
63
+ # If it doesn't already exist, make the quant
64
+ if [ ! -d "$CONVERTED_FOLDER" ]; then
65
+
66
+ echo "Creating $CONVERTED_FOLDER"
67
+
68
+ # Create directories
69
+ if [ -d "$OUTPUT_DIR" ]; then
70
+ rm -r "$OUTPUT_DIR"
71
+ fi
72
+ mkdir "$OUTPUT_DIR"
73
+ mkdir "$CONVERTED_FOLDER"
74
+
75
+ # Run conversion commands
76
+ python convert.py -i $MODEL_DIR -o $OUTPUT_DIR -nr -m $MEASUREMENT_FILE -b $BIT_PRECISION -cf $CONVERTED_FOLDER
77
+
78
+ fi
79
+ done
80
+ ```
config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "",
3
+ "architectures": [
4
+ "MixtralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 6144,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 16384,
13
+ "max_position_embeddings": 65536,
14
+ "model_type": "mixtral",
15
+ "num_attention_heads": 48,
16
+ "num_experts_per_tok": 2,
17
+ "num_hidden_layers": 56,
18
+ "num_key_value_heads": 8,
19
+ "num_local_experts": 8,
20
+ "output_router_logits": false,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_theta": 1000000,
23
+ "router_aux_loss_coef": 0.001,
24
+ "router_jitter_noise": 0.0,
25
+ "sliding_window": null,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "bfloat16",
28
+ "transformers_version": "4.36.2",
29
+ "use_cache": false,
30
+ "vocab_size": 32000,
31
+ "quantization_config": {
32
+ "quant_method": "exl2",
33
+ "version": "0.0.18",
34
+ "bits": 7.0,
35
+ "head_bits": 6,
36
+ "calibration": {
37
+ "rows": 100,
38
+ "length": 2048,
39
+ "dataset": "(default)"
40
+ }
41
+ }
42
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.36.2"
6
+ }
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
output-00001-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91491ee66f4c7f2a06f969af6724e1ad0fccd7c32dd4ab43d8a1fb8d3314728a
3
+ size 8589806920
output-00002-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4c4ce1f22df03bd4557590f038778133c3c352f6aad19fb0cc5b5f9007765ab
3
+ size 8589077616
output-00003-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c57f20324286e5c8138be2024d7df4f20063876f211a86d66295efe8162bc59
3
+ size 8557685488
output-00004-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:583ae276a8e4853bfa96e4517caa5db0f0dacadb3f687d67350e6c51040fc543
3
+ size 8518622504
output-00005-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f457410acb9ea244fe44e6751dbc61b8cdf4234e5f3b08b98335fdbc26d674e1
3
+ size 8562167968
output-00006-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4f5609df34122d05ab90a49a39637abff19ec223297e4abd061eaed3708c16b
3
+ size 8551482560
output-00007-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:998e43a7ac8419c5b73c76963caf2a2adf3dd7d1ed9fd028f96dd25d7e033e1e
3
+ size 8551482560
output-00008-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34f4b2da4a8692936e78c3c521b57674377d166f29e834e41d9d1bad36f315f9
3
+ size 8549846248
output-00009-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1a0270350896fa8703492dfd8100afe2ae489114c6117cea657fd358c39ae79
3
+ size 8551482560
output-00010-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1bc5939fcedb7dced2da93c50dc52726b39bb9cb80c718987d076088b2bc7d3
3
+ size 8549846264
output-00011-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fa354bb90a669b6193905b9d22b0d633c031991a150249648627ca3dc34aba0
3
+ size 8551482560
output-00012-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c699a8234abcf8e1a93429bc6bb4f7f04dbf95e774c3bdbcd6d07d6fa1f8795
3
+ size 8551482560
output-00013-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c82fb54df2a2ff2593533d41bf2e81920ae98181bd6393729ec7a90511a58a7e
3
+ size 8549846248
output-00014-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb979f19ed23001a066de48c754422578f8895f70c0f2a5fe5053d772d55ed3e
3
+ size 8551482560
output-00015-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6cb977dc8b43cc79c1e40877ef384433739852ec4663c4a93120fc95c419e78
3
+ size 3476710520
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<unk>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [],
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": true,
35
+ "model_max_length": 1000000000000000019884624838656,
36
+ "pad_token": "<unk>",
37
+ "padding_side": "right",
38
+ "sp_model_kwargs": {},
39
+ "spaces_between_special_tokens": false,
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": false
43
+ }