Upload folder using huggingface_hub

#1
by jsfs11 - opened
.ipynb_checkpoints/README-checkpoint.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model:
3
+ - alpindale/WizardLM-2-8x22B
4
+ - mistralai/Mixtral-8x22B-Instruct-v0.1
5
+ tags:
6
+ - mergekit
7
+ - merge
8
+
9
+ ---
10
+ # merge.py
11
+
12
+ This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
13
+
14
+ ## Merge Details
15
+ ### Merge Method
16
+
17
+ This model was merged using the SLERP merge method.
18
+
19
+ ### Models Merged
20
+
21
+ The following models were included in the merge:
22
+ * [alpindale/WizardLM-2-8x22B](https://huggingface.co/alpindale/WizardLM-2-8x22B)
23
+ * [mistralai/Mixtral-8x22B-Instruct-v0.1](https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1)
24
+
25
+ ### Configuration
26
+
27
+ The following YAML configuration was used to produce this model:
28
+
29
+ ```yaml
30
+
31
+ slices:
32
+ - sources:
33
+ - model: mistralai/Mixtral-8x22B-Instruct-v0.1
34
+ layer_range: [0, 56]
35
+ - model: alpindale/WizardLM-2-8x22B
36
+ layer_range: [0, 56]
37
+ merge_method: slerp
38
+ base_model: mistralai/Mixtral-8x22B-Instruct-v0.1
39
+ parameters:
40
+ t:
41
+ - filter: self_attn
42
+ value: [0, 0.5, 0.3, 0.7, 1]
43
+ - filter: mlp
44
+ value: [1, 0.5, 0.7, 0.3, 0]
45
+ - value: 0.5
46
+ dtype: bfloat16
47
+
48
+ ```
.ipynb_checkpoints/config-checkpoint.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "mistralai/Mixtral-8x22B-Instruct-v0.1",
3
+ "architectures": [
4
+ "MixtralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 6144,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 16384,
13
+ "max_position_embeddings": 65536,
14
+ "model_type": "mixtral",
15
+ "num_attention_heads": 48,
16
+ "num_experts_per_tok": 2,
17
+ "num_hidden_layers": 56,
18
+ "num_key_value_heads": 8,
19
+ "num_local_experts": 8,
20
+ "output_router_logits": false,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_theta": 1000000.0,
23
+ "router_aux_loss_coef": 0.001,
24
+ "router_jitter_noise": 0.0,
25
+ "sliding_window": null,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "bfloat16",
28
+ "transformers_version": "4.40.0",
29
+ "use_cache": true,
30
+ "vocab_size": 32768
31
+ }
.ipynb_checkpoints/mergekit_config-checkpoint.yml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ slices:
3
+ - sources:
4
+ - model: mistralai/Mixtral-8x22B-Instruct-v0.1
5
+ layer_range: [0, 56]
6
+ - model: alpindale/WizardLM-2-8x22B
7
+ layer_range: [0, 56]
8
+ merge_method: slerp
9
+ base_model: mistralai/Mixtral-8x22B-Instruct-v0.1
10
+ parameters:
11
+ t:
12
+ - filter: self_attn
13
+ value: [0, 0.5, 0.3, 0.7, 1]
14
+ - filter: mlp
15
+ value: [1, 0.5, 0.7, 0.3, 0]
16
+ - value: 0.5
17
+ dtype: bfloat16
README.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model:
3
+ - alpindale/WizardLM-2-8x22B
4
+ - mistralai/Mixtral-8x22B-Instruct-v0.1
5
+ tags:
6
+ - mergekit
7
+ - merge
8
+
9
+ ---
10
+ # merge.py
11
+
12
+ This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
13
+
14
+ ## Merge Details
15
+ ### Merge Method
16
+
17
+ This model was merged using the SLERP merge method.
18
+
19
+ ### Models Merged
20
+
21
+ The following models were included in the merge:
22
+ * [alpindale/WizardLM-2-8x22B](https://huggingface.co/alpindale/WizardLM-2-8x22B)
23
+ * [mistralai/Mixtral-8x22B-Instruct-v0.1](https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1)
24
+
25
+ ### Configuration
26
+
27
+ The following YAML configuration was used to produce this model:
28
+
29
+ ```yaml
30
+
31
+ slices:
32
+ - sources:
33
+ - model: mistralai/Mixtral-8x22B-Instruct-v0.1
34
+ layer_range: [0, 56]
35
+ - model: alpindale/WizardLM-2-8x22B
36
+ layer_range: [0, 56]
37
+ merge_method: slerp
38
+ base_model: mistralai/Mixtral-8x22B-Instruct-v0.1
39
+ parameters:
40
+ t:
41
+ - filter: self_attn
42
+ value: [0, 0.5, 0.3, 0.7, 1]
43
+ - filter: mlp
44
+ value: [1, 0.5, 0.7, 0.3, 0]
45
+ - value: 0.5
46
+ dtype: bfloat16
47
+
48
+ ```
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "mistralai/Mixtral-8x22B-Instruct-v0.1",
3
+ "architectures": [
4
+ "MixtralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 6144,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 16384,
13
+ "max_position_embeddings": 65536,
14
+ "model_type": "mixtral",
15
+ "num_attention_heads": 48,
16
+ "num_experts_per_tok": 2,
17
+ "num_hidden_layers": 56,
18
+ "num_key_value_heads": 8,
19
+ "num_local_experts": 8,
20
+ "output_router_logits": false,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_theta": 1000000.0,
23
+ "router_aux_loss_coef": 0.001,
24
+ "router_jitter_noise": 0.0,
25
+ "sliding_window": null,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "bfloat16",
28
+ "transformers_version": "4.40.0",
29
+ "use_cache": true,
30
+ "vocab_size": 32768
31
+ }
mergekit_config.yml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ slices:
3
+ - sources:
4
+ - model: mistralai/Mixtral-8x22B-Instruct-v0.1
5
+ layer_range: [0, 56]
6
+ - model: alpindale/WizardLM-2-8x22B
7
+ layer_range: [0, 56]
8
+ merge_method: slerp
9
+ base_model: mistralai/Mixtral-8x22B-Instruct-v0.1
10
+ parameters:
11
+ t:
12
+ - filter: self_attn
13
+ value: [0, 0.5, 0.3, 0.7, 1]
14
+ - filter: mlp
15
+ value: [1, 0.5, 0.7, 0.3, 0]
16
+ - value: 0.5
17
+ dtype: bfloat16
model-00001-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc8567b571939ae5dfb5d48c49b16563253dc36e69cf117e0e7df3c1d2ef6e1a
3
+ size 9805561216
model-00002-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35da0fa5000f7e5f08eeec69b68bd2660a8862e1f60ded9f7f79823310538301
3
+ size 9814924936
model-00003-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e87dd44a050a913508f46c78617264149c6c3c47fbcecbd58a25fb4aa7824fd
3
+ size 9814924936
model-00004-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63b89941a6ec1fe3c6173b3e36fd489da7274fe0b52b7902a57aed087a4b1847
3
+ size 9814924936
model-00005-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d6dff6be8d77dedaa4d31f32ae303c541f7a886505c12fad9397d2c905834f5
3
+ size 9814924944
model-00006-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8624892ac9044964c875b06c2db7c91745393a0a290874358d97c343d4db31ec
3
+ size 9814924984
model-00007-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a46d253d6919362f1ae2927bf19840ba814210a8bf27785579ade33a2c22bbc1
3
+ size 9991086240
model-00008-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e55b5f572ff519aadb1accec22ac4050842b0c9cb03c216affe3f56985197b41
3
+ size 9814925000
model-00009-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a9ca4a8ceff1b6970da8b2aed31030efcd0084fee696308a2289d21a8c50311
3
+ size 9814925000
model-00010-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2450a798bcac95c61b49766c317329352db1ee9725003193fb27981bb8cc70c
3
+ size 9814925000
model-00011-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1069ffd8bd4db1ad5bdf344048ae8fac17aef5d0c110ec043f48f57cda65c913
3
+ size 9940655712
model-00012-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5e260cb350e31366b700bd76e22f9369caefa1a2484275c64785c0b55215736
3
+ size 9903005048
model-00013-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d5da31c3d475f3276ba8f1bbddee5e3f117879bb11a92ea04e2786ff89b16b1
3
+ size 9852673512
model-00014-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af4302d1e5cda294fc68265486372af50246345bcd1ed6a471fd217bda73be1b
3
+ size 9814900192
model-00015-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0862d1164d0266502a32f41fa8a85388dc82d7d4e132f4a9a1167710752620cd
3
+ size 9814925000
model-00016-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0521ccafb1043fdf2a1c8eebc493914feb0d204e88aa213b071f4691fae4cb03
3
+ size 9814925000
model-00017-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1beffd9064e76c73b2993f9767544812a6a9b6b035dbb5cb7a8a46aeaf3bbb46
3
+ size 9991086240
model-00018-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29b2b3f60f4a9eb4da537ff1198caa8d07dd9d4c6bc0ad904fe7d767de01c5fa
3
+ size 9814925000
model-00019-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a1974226df47d63d8ebac3036baf463a325ba933576201aec4a8416d5292232
3
+ size 9814925000
model-00020-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3dc866ff028b49ab85fa20ebf15812b0cc2b4ad2e3690d75120af530da747da
3
+ size 9814925000
model-00021-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be418005d1dedfc40b44af4087a6f5288a6bedd02427b34f7e4e7574f990e352
3
+ size 9814925000
model-00022-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6f3b58bbadb2bdf320cc04b75f23fa7d0da01b64e1c53bada73090e4e3f41c2
3
+ size 9814925000
model-00023-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d41167ae5d1c97049642000b60b8a5cb4781f8af6618d80602d40f2ab2a558d
3
+ size 9814925000
model-00024-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0586c1baf7eb695b0866b3000c26963f855f587eb7b10ad4f23840e6fd8a81a5
3
+ size 9991209472
model-00025-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d14c1b8276a62786f3f6987db8035453f862ad6832705ab5db9e3a1d48105b2c
3
+ size 9852575088
model-00026-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0395ea255b66391c1278012a323d36ed7e63ecb1d2d31ef23f9d32dbafe9b00c
3
+ size 9903005048
model-00027-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7db19d71c9dcd7ea6f4c5b2f067fe5ed9be0b321ae42478a26272ddf36e0d610
3
+ size 9940729328
model-00028-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0d2bdb93a12602e3a7c06c999f380485ff8f3ac3cedc1396351398881e4b355
3
+ size 9814925000
model-00029-of-00029.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9ab71c0185b7a860b4764439440202e7ab67883b7c770c9d21ce2032ab18a6f
3
+ size 5401280728
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "3": {
30
+ "content": "[INST]",
31
+ "lstrip": false,
32
+ "normalized": true,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "4": {
38
+ "content": "[/INST]",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "5": {
46
+ "content": "[TOOL_CALLS]",
47
+ "lstrip": false,
48
+ "normalized": true,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "6": {
54
+ "content": "[AVAILABLE_TOOLS]",
55
+ "lstrip": false,
56
+ "normalized": true,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "7": {
62
+ "content": "[/AVAILABLE_TOOLS]",
63
+ "lstrip": false,
64
+ "normalized": true,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "8": {
70
+ "content": "[TOOL_RESULTS]",
71
+ "lstrip": false,
72
+ "normalized": true,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "9": {
78
+ "content": "[/TOOL_RESULTS]",
79
+ "lstrip": false,
80
+ "normalized": true,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ }
85
+ },
86
+ "additional_special_tokens": [],
87
+ "bos_token": "<s>",
88
+ "chat_template": "{{bos_token}}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ ' [INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + message['content'] + ' ' + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
89
+ "clean_up_tokenization_spaces": false,
90
+ "eos_token": "</s>",
91
+ "legacy": true,
92
+ "model_max_length": 1000000000000000019884624838656,
93
+ "pad_token": null,
94
+ "sp_model_kwargs": {},
95
+ "spaces_between_special_tokens": false,
96
+ "tokenizer_class": "LlamaTokenizer",
97
+ "unk_token": "<unk>",
98
+ "use_default_system_prompt": false
99
+ }