MaziyarPanahi commited on
Commit
a3d4a67
1 Parent(s): 0ddcf43

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - finetuned
4
+ - quantized
5
+ - 4-bit
6
+ - AWQ
7
+ - transformers
8
+ - safetensors
9
+ - mixtral
10
+ - text-generation
11
+ - moe
12
+ - fr
13
+ - it
14
+ - de
15
+ - es
16
+ - en
17
+ - license:apache-2.0
18
+ - autotrain_compatible
19
+ - endpoints_compatible
20
+ - text-generation-inference
21
+ - region:us
22
+ model_name: Mixtral-8x22B-v0.1-AWQ
23
+ base_model: v2ray/Mixtral-8x22B-v0.1
24
+ inference: false
25
+ model_creator: v2ray
26
+ pipeline_tag: text-generation
27
+ quantized_by: MaziyarPanahi
28
+ ---
29
+ # Description
30
+ [MaziyarPanahi/Mixtral-8x22B-v0.1-AWQ](https://huggingface.co/MaziyarPanahi/Mixtral-8x22B-v0.1-AWQ) is a quantized (AWQ) version of [v2ray/Mixtral-8x22B-v0.1](https://huggingface.co/v2ray/Mixtral-8x22B-v0.1)
31
+
32
+ ## How to use
33
+ ### Install the necessary packages
34
+
35
+ ```
36
+ pip install --upgrade accelerate autoawq transformers
37
+ ```
38
+
39
+ ### Example Python code
40
+
41
+
42
+ ```python
43
+ from transformers import AutoTokenizer, AutoModelForCausalLM
44
+
45
+ model_id = "MaziyarPanahi/Mixtral-8x22B-v0.1-AWQ"
46
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
47
+ model = AutoModelForCausalLM.from_pretrained(model_id).to(0)
48
+
49
+ text = "User:\nHello can you provide me with top-3 cool places to visit in Paris?\n\nAssistant:\n"
50
+ inputs = tokenizer(text, return_tensors="pt").to(0)
51
+
52
+ out = model.generate(**inputs, max_new_tokens=300)
53
+ print(tokenizer.decode(out[0], skip_special_tokens=True))
54
+ ```
55
+
56
+ Results:
57
+ ```
58
+ User:
59
+ Hello can you provide me with top-3 cool places to visit in Paris?
60
+
61
+ Assistant:
62
+ Absolutely, here are my top-3 recommendations for must-see places in Paris:
63
+
64
+ 1. The Eiffel Tower: An icon of Paris, this wrought-iron lattice tower is a global cultural icon of France and is among the most recognizable structures in the world. Climbing up to the top offers breathtaking views of the city.
65
+
66
+ 2. The Louvre Museum: Home to thousands of works of art, the Louvre is the world's largest art museum and a historic monument in Paris. Must-see pieces include the Mona Lisa, the Winged Victory of Samothrace, and the Venus de Milo.
67
+
68
+ 3. Notre-Dame Cathedral: This cathedral is a masterpiece of French Gothic architecture and is famous for its intricate stone carvings, beautiful stained glass, and its iconic twin towers. Be sure to spend some time exploring its history and learning about the fascinating restoration efforts post the 2019 fire.
69
+
70
+ I hope you find these recommendations helpful and that they make for an enjoyable and memorable trip to Paris. Safe travels!
71
+ ```
config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/maziyar/.cache/huggingface/hub/models--v2ray--Mixtral-8x22B-v0.1/snapshots/8dd41a0a67ec8ab577e03e5bf04f149b7c2a7ff6",
3
+ "architectures": [
4
+ "MixtralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 6144,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 16384,
13
+ "max_position_embeddings": 65536,
14
+ "model_type": "mixtral",
15
+ "num_attention_heads": 48,
16
+ "num_experts_per_tok": 2,
17
+ "num_hidden_layers": 56,
18
+ "num_key_value_heads": 8,
19
+ "num_local_experts": 8,
20
+ "output_router_logits": false,
21
+ "quantization_config": {
22
+ "bits": 4,
23
+ "group_size": 128,
24
+ "modules_to_not_convert": [
25
+ "gate"
26
+ ],
27
+ "quant_method": "awq",
28
+ "version": "gemm",
29
+ "zero_point": true
30
+ },
31
+ "rms_norm_eps": 1e-05,
32
+ "rope_theta": 1000000,
33
+ "router_aux_loss_coef": 0.001,
34
+ "router_jitter_noise": 0.0,
35
+ "sliding_window": null,
36
+ "tie_word_embeddings": false,
37
+ "torch_dtype": "float16",
38
+ "transformers_version": "4.38.2",
39
+ "use_cache": true,
40
+ "vocab_size": 32000
41
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "do_sample": true,
5
+ "eos_token_id": 2,
6
+ "transformers_version": "4.38.2"
7
+ }
model-00001-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af48a8b6f555fac9aec21a6d597153b1c788d6d39f35dd1c81fd101238263be7
3
+ size 4969773192
model-00002-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfe12f9bfaa41f5c1984e62dd74bfbc38196395e7591b4728c7de57002973766
3
+ size 4994966792
model-00003-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e6e29e418b551919f47a0f2266b63b4da02af7f3dfe569d533da96f69e81c1c
3
+ size 4994966904
model-00004-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:736f175bd04073502798eccff3fb26440162907cec69e71e6fa7945761d0e3a4
3
+ size 4994967128
model-00005-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bce39015c167a570c160482cd7875c211155cd931cacd4dce1928c71163321ed
3
+ size 4999807128
model-00006-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8aa1e232e29a5b222eafe055c3eba833297b33851a8c8be092458a7a65ff534b
3
+ size 4996540120
model-00007-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cd8d067408e5989b20e294b9d2289ab9c5026bfa8f2c1881bbaac1f88439a84
3
+ size 4994967128
model-00008-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c891f2feb9a5a15d947f3492d3e59bc5cd817c83bb83b5e64e8870f58c104256
3
+ size 4994967128
model-00009-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67ede623ce7620cb45f22002d5ab459abf0a89e114c85a9c73c9aeecfa353c71
3
+ size 4994967128
model-00010-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0253da867eb65b6d37f0be5ba9f3dfe3635f8698dd6974e91873575e6ae55c83
3
+ size 4994967128
model-00011-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8d454ea590f83de93c0140a3ab34437f30dceeb44f31372f47f8c67a94dc074
3
+ size 4999807128
model-00012-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:676af6b69e9f1618f454cd5964774cd68d492495f03b511bbeced7aef909e0e7
3
+ size 4996540120
model-00013-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d092b2dba1754387c823403e3abfa9f16661974d22059527400ec3e8b46a91f
3
+ size 4994967128
model-00014-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3d01189b119fe29d2881c65ac342b4ad73ea5d4b0e2bf335704f907522e5c02
3
+ size 4994967128
model-00015-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7252111e88116bd1a1f1ae47cb9d6f73b142a30f1acd44e5a644b09f33a6907
3
+ size 3727506680
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [],
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": true,
35
+ "model_max_length": 1000000000000000019884624838656,
36
+ "pad_token": null,
37
+ "sp_model_kwargs": {},
38
+ "spaces_between_special_tokens": false,
39
+ "tokenizer_class": "LlamaTokenizer",
40
+ "unk_token": "<unk>",
41
+ "use_default_system_prompt": false
42
+ }