Aratako commited on
Commit
e641a78
1 Parent(s): f9c661e

model upload

Browse files
README.md CHANGED
@@ -1,3 +1,54 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: []
3
+ library_name: transformers
4
+ tags:
5
+ - mergekit
6
+ - merge
7
+
8
+ ---
9
+ # mixtral-upscaled
10
+
11
+ This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
12
+
13
+ ## Merge Details
14
+ ### Merge Method
15
+
16
+ This model was merged using the passthrough merge method.
17
+
18
+ ### Models Merged
19
+
20
+ The following models were included in the merge:
21
+ * ./Mixtral-8x7B-Instruct-v0.1
22
+
23
+ ### Configuration
24
+
25
+ The following YAML configuration was used to produce this model:
26
+
27
+ ```yaml
28
+ merge_method: passthrough
29
+ slices:
30
+ - sources:
31
+ - model: ./Mixtral-8x7B-Instruct-v0.1
32
+ layer_range: [0, 8]
33
+ - sources:
34
+ - model: ./Mixtral-8x7B-Instruct-v0.1
35
+ layer_range: [4, 12]
36
+ - sources:
37
+ - model: ./Mixtral-8x7B-Instruct-v0.1
38
+ layer_range: [8, 16]
39
+ - sources:
40
+ - model: ./Mixtral-8x7B-Instruct-v0.1
41
+ layer_range: [12, 20]
42
+ - sources:
43
+ - model: ./Mixtral-8x7B-Instruct-v0.1
44
+ layer_range: [16, 24]
45
+ - sources:
46
+ - model: ./Mixtral-8x7B-Instruct-v0.1
47
+ layer_range: [20, 28]
48
+ - sources:
49
+ - model: ./Mixtral-8x7B-Instruct-v0.1
50
+ layer_range: [24, 32]
51
+ dtype: bfloat16
52
+ tokenizer_source: base
53
+
54
+ ```
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./Mixtral-8x7B-Instruct-v0.1",
3
+ "architectures": [
4
+ "MixtralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 4096,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 14336,
13
+ "max_position_embeddings": 32768,
14
+ "model_type": "mixtral",
15
+ "num_attention_heads": 32,
16
+ "num_experts_per_tok": 2,
17
+ "num_hidden_layers": 56,
18
+ "num_key_value_heads": 8,
19
+ "num_local_experts": 8,
20
+ "output_router_logits": false,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_theta": 1000000.0,
23
+ "router_aux_loss_coef": 0.02,
24
+ "sliding_window": null,
25
+ "tie_word_embeddings": false,
26
+ "torch_dtype": "bfloat16",
27
+ "transformers_version": "4.38.1",
28
+ "use_cache": true,
29
+ "vocab_size": 32000
30
+ }
mergekit_config.yml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ merge_method: passthrough
2
+ slices:
3
+ - sources:
4
+ - model: ./Mixtral-8x7B-Instruct-v0.1
5
+ layer_range: [0, 8]
6
+ - sources:
7
+ - model: ./Mixtral-8x7B-Instruct-v0.1
8
+ layer_range: [4, 12]
9
+ - sources:
10
+ - model: ./Mixtral-8x7B-Instruct-v0.1
11
+ layer_range: [8, 16]
12
+ - sources:
13
+ - model: ./Mixtral-8x7B-Instruct-v0.1
14
+ layer_range: [12, 20]
15
+ - sources:
16
+ - model: ./Mixtral-8x7B-Instruct-v0.1
17
+ layer_range: [16, 24]
18
+ - sources:
19
+ - model: ./Mixtral-8x7B-Instruct-v0.1
20
+ layer_range: [20, 28]
21
+ - sources:
22
+ - model: ./Mixtral-8x7B-Instruct-v0.1
23
+ layer_range: [24, 32]
24
+ dtype: bfloat16
25
+ tokenizer_source: base
model-00001-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6c09933a9a453c4c5ebf8f43ea35f0e3dd16f86cc33c54323f2ed6e080df32d
3
+ size 9993254680
model-00002-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74c215757d5a143741f8d5008dd149ab56e9b55e72273938dec941723836f407
3
+ size 9899014072
model-00003-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0402a2e86bf72d5758cd9eec0768c5f7cc6b7a846dac18aa35b31c394f0ef0a0
3
+ size 9915511056
model-00004-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a10a453dcc571e97c65c9ad41a2f585a6fdbb6db1460f4513438d23a8dc373e
3
+ size 9899064448
model-00005-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75d5575c027b0cd0c457f170a73264c1ce0fe952aa6317b27c2ca9aa24d90543
3
+ size 9915511120
model-00006-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df8fe2359a81096208a31b96b92f5aa6a50cd316f3073172f5a36611d02271dc
3
+ size 9915511120
model-00007-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c513b3ea3c2c9ea00e15d483c4e9f45761aa2e3172c85bc4c94277b9de856bef
3
+ size 9899064480
model-00008-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:470b515fd86aea4b0451c5b179fc067bab1dea3ddc4f4083e713aa2db3b29802
3
+ size 9915511120
model-00009-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68272ae6467c46927d0e94465fc3833f86a58cf2c20b6fe0fdb90dc0f7446cdb
3
+ size 9915511120
model-00010-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc1dd70c8d64d9b4d4e36cfa794ebfee95257b5c43d241b61c685bd9db8e3d7d
3
+ size 9966008120
model-00011-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f489a10aefe8f11bd34995e85e850ce76bbdab697b6cb2b01e9140107525ddc
3
+ size 9899064488
model-00012-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6272a0673ceab217cf8fb13c9b1aca0e4b237073b129280f5e4566ff178d55d0
3
+ size 9915511120
model-00013-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:497852cdbcc4faa1edddcfb72aa78d5dbc3c0b6824471661aa91c94a8ff6b5d0
3
+ size 9915511120
model-00014-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7aaf11afb1c7b80a168a338977ee562f66585836552a40fecbda9fb52ec71c04
3
+ size 9899064488
model-00015-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:720c4c02274d2da5555af238b8072cbb426ab3073346c22fa932567cb47ceaaa
3
+ size 9948999560
model-00016-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63022a88a71114e047d7b257f4cf72bb125cd8cf8930cd47bb106a4ff4eb6569
3
+ size 9915544352
model-00017-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aea3ee3b8bbc0a0f66042e09e0b41965c531b674288c6b75a295b85181a06d4f
3
+ size 4339119736
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [],
31
+ "bos_token": "<s>",
32
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
33
+ "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
+ "legacy": true,
36
+ "model_max_length": 1000000000000000019884624838656,
37
+ "pad_token": null,
38
+ "sp_model_kwargs": {},
39
+ "spaces_between_special_tokens": false,
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": false
43
+ }