JoPmt commited on
Commit
c2e8be6
1 Parent(s): 0ac2829

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model:
3
+ - cognitivecomputations/TinyDolphin-2.8-1.1b
4
+ - 78health/TinyLlama_1.1B-function-calling
5
+ - DaertML/TinyGauss-1.1B
6
+ license: apache-2.0
7
+ tags:
8
+ - moe
9
+ - frankenmoe
10
+ - merge
11
+ - mergekit
12
+ - lazymergekit
13
+ - cognitivecomputations/TinyDolphin-2.8-1.1b
14
+ - 78health/TinyLlama_1.1B-function-calling
15
+ - DaertML/TinyGauss-1.1B
16
+ ---
17
+
18
+ # TinyEnsemble-3x1.1B-TinyMoE
19
+
20
+ TinyEnsemble-3x1.1B-TinyMoE is a Mixture of Experts (MoE) made with the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing):
21
+ * [cognitivecomputations/TinyDolphin-2.8-1.1b](https://huggingface.co/cognitivecomputations/TinyDolphin-2.8-1.1b)
22
+ * [78health/TinyLlama_1.1B-function-calling](https://huggingface.co/78health/TinyLlama_1.1B-function-calling)
23
+ * [DaertML/TinyGauss-1.1B](https://huggingface.co/DaertML/TinyGauss-1.1B)
24
+
25
+ ## 🧩 Configuration
26
+
27
+ ```yaml
28
+ base_model: cognitivecomputations/TinyDolphin-2.8-1.1b
29
+ gate_mode: cheap_embed
30
+ dtype: bfloat16
31
+ experts:
32
+ - source_model: cognitivecomputations/TinyDolphin-2.8-1.1b
33
+ positive_prompts: ["write", "explain", "summarize", "how", "what", "acting"]
34
+ - source_model: 78health/TinyLlama_1.1B-function-calling
35
+ positive_prompts: ["code", "python", "javascript", "programming", "script", "run", "create"]
36
+ - source_model: DaertML/TinyGauss-1.1B
37
+ positive_prompts: ["count", "math", "algorithm", "crypto", "logic", "reason"]
38
+ ```
39
+
40
+ ## 💻 Usage
41
+
42
+ ```python
43
+ !pip install -qU transformers bitsandbytes accelerate
44
+
45
+ from transformers import AutoTokenizer
46
+ import transformers
47
+ import torch
48
+
49
+ model = "JoPmt/TinyEnsemble-3x1.1B-TinyMoE"
50
+
51
+ tokenizer = AutoTokenizer.from_pretrained(model)
52
+ pipeline = transformers.pipeline(
53
+ "text-generation",
54
+ model=model,
55
+ model_kwargs={"torch_dtype": torch.float16, "load_in_4bit": True},
56
+ )
57
+
58
+ messages = [{"role": "user", "content": "Explain what a Mixture of Experts is in less than 100 words."}]
59
+ prompt = pipeline.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
60
+ outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
61
+ print(outputs[0]["generated_text"])
62
+ ```
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|im_end|>": 32000,
3
+ "<|im_start|>": 32001
4
+ }
config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "cognitivecomputations/TinyDolphin-2.8-1.1b",
3
+ "architectures": [
4
+ "MixtralForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 32000,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 2048,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 5632,
14
+ "max_position_embeddings": 4096,
15
+ "mlp_bias": false,
16
+ "model_type": "mixtral",
17
+ "num_attention_heads": 32,
18
+ "num_experts_per_tok": 2,
19
+ "num_hidden_layers": 22,
20
+ "num_key_value_heads": 4,
21
+ "num_local_experts": 3,
22
+ "output_router_logits": false,
23
+ "pretraining_tp": 1,
24
+ "rms_norm_eps": 1e-05,
25
+ "rope_scaling": null,
26
+ "rope_theta": 10000.0,
27
+ "router_aux_loss_coef": 0.001,
28
+ "router_jitter_noise": 0.0,
29
+ "sliding_window": null,
30
+ "tie_word_embeddings": false,
31
+ "torch_dtype": "bfloat16",
32
+ "transformers_version": "4.41.2",
33
+ "use_cache": true,
34
+ "vocab_size": 32002
35
+ }
mergekit_moe_config.yml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ base_model: cognitivecomputations/TinyDolphin-2.8-1.1b
3
+ gate_mode: cheap_embed
4
+ dtype: bfloat16
5
+ experts:
6
+ - source_model: cognitivecomputations/TinyDolphin-2.8-1.1b
7
+ positive_prompts: ["write", "explain", "summarize", "how", "what", "acting"]
8
+ - source_model: 78health/TinyLlama_1.1B-function-calling
9
+ positive_prompts: ["code", "python", "javascript", "programming", "script", "run", "create"]
10
+ - source_model: DaertML/TinyGauss-1.1B
11
+ positive_prompts: ["count", "math", "algorithm", "crypto", "logic", "reason"]
model-00001-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65a685ae56a7be711f0dc17a2df84fdabec086f1c1d12dfb595d56d232a13ca1
3
+ size 990952512
model-00002-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd183b9fdc13f5d88b149da94288a5509170c180c0eb55a90aa2cb4a1a5df6a2
3
+ size 994099456
model-00003-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b493950a4b2baf7c3002d58e7b039826f14bdeab6b37a8332bdc6fb5e73ae931
3
+ size 998285048
model-00004-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df0bdeca62ac62532c10a7e280136b2dc9f2d3a08323d81ce8225f46fa983dfc
3
+ size 998289280
model-00005-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:322a6c3064490b35659b7e0df8b28acc782ef219cb00c955828ca93fb84107c9
3
+ size 994095312
model-00006-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb1b3828dd530d1871000a64d843ecd913080ae709e283ba2dd52b6d06ccc464
3
+ size 269770232
model.safetensors.index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata": {"mergekit_version": "0.0.4.2", "total_size": 5245448192}, "weight_map": {"model.embed_tokens.weight": "model-00001-of-00006.safetensors", "model.layers.0.input_layernorm.weight": "model-00001-of-00006.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00006.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00006.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00006.safetensors", "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00006.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00006.safetensors", "model.layers.0.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00006.safetensors", "model.layers.0.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00006.safetensors", "model.layers.0.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00006.safetensors", "model.layers.0.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00006.safetensors", "model.layers.0.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00006.safetensors", "model.layers.0.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00006.safetensors", "model.layers.0.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00006.safetensors", "model.layers.0.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00006.safetensors", "model.layers.0.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00006.safetensors", "model.layers.1.input_layernorm.weight": "model-00001-of-00006.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00006.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00006.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00006.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00006.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00006.safetensors", "model.layers.1.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00006.safetensors", "model.layers.1.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00006.safetensors", "model.layers.1.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00006.safetensors", "model.layers.1.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00006.safetensors", "model.layers.1.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00006.safetensors", "model.layers.1.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00006.safetensors", "model.layers.1.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00006.safetensors", "model.layers.1.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00006.safetensors", "model.layers.1.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00006.safetensors", "model.layers.2.input_layernorm.weight": "model-00001-of-00006.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00006.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00006.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00006.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00006.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00006.safetensors", "model.layers.2.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00006.safetensors", "model.layers.2.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00006.safetensors", "model.layers.2.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00006.safetensors", "model.layers.2.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00006.safetensors", "model.layers.2.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00006.safetensors", "model.layers.2.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00006.safetensors", "model.layers.2.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00006.safetensors", "model.layers.2.block_sparse_moe.experts.1.w2.weight": "model-00001-of-00006.safetensors", "model.layers.2.block_sparse_moe.experts.2.w2.weight": "model-00001-of-00006.safetensors", "model.layers.3.input_layernorm.weight": "model-00001-of-00006.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00006.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00006.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00006.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00006.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00006.safetensors", "model.layers.3.block_sparse_moe.experts.0.w3.weight": "model-00001-of-00006.safetensors", "model.layers.3.block_sparse_moe.experts.1.w3.weight": "model-00001-of-00006.safetensors", "model.layers.3.block_sparse_moe.experts.2.w3.weight": "model-00001-of-00006.safetensors", "model.layers.3.block_sparse_moe.experts.0.w1.weight": "model-00001-of-00006.safetensors", "model.layers.3.block_sparse_moe.experts.1.w1.weight": "model-00001-of-00006.safetensors", "model.layers.3.block_sparse_moe.experts.2.w1.weight": "model-00001-of-00006.safetensors", "model.layers.3.block_sparse_moe.experts.0.w2.weight": "model-00001-of-00006.safetensors", "model.layers.3.block_sparse_moe.experts.1.w2.weight": "model-00002-of-00006.safetensors", "model.layers.3.block_sparse_moe.experts.2.w2.weight": "model-00002-of-00006.safetensors", "model.layers.4.input_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00002-of-00006.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00002-of-00006.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00002-of-00006.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00002-of-00006.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.4.block_sparse_moe.experts.0.w3.weight": "model-00002-of-00006.safetensors", "model.layers.4.block_sparse_moe.experts.1.w3.weight": "model-00002-of-00006.safetensors", "model.layers.4.block_sparse_moe.experts.2.w3.weight": "model-00002-of-00006.safetensors", "model.layers.4.block_sparse_moe.experts.0.w1.weight": "model-00002-of-00006.safetensors", "model.layers.4.block_sparse_moe.experts.1.w1.weight": "model-00002-of-00006.safetensors", "model.layers.4.block_sparse_moe.experts.2.w1.weight": "model-00002-of-00006.safetensors", "model.layers.4.block_sparse_moe.experts.0.w2.weight": "model-00002-of-00006.safetensors", "model.layers.4.block_sparse_moe.experts.1.w2.weight": "model-00002-of-00006.safetensors", "model.layers.4.block_sparse_moe.experts.2.w2.weight": "model-00002-of-00006.safetensors", "model.layers.5.input_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00002-of-00006.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00002-of-00006.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00002-of-00006.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00002-of-00006.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.5.block_sparse_moe.experts.0.w3.weight": "model-00002-of-00006.safetensors", "model.layers.5.block_sparse_moe.experts.1.w3.weight": "model-00002-of-00006.safetensors", "model.layers.5.block_sparse_moe.experts.2.w3.weight": "model-00002-of-00006.safetensors", "model.layers.5.block_sparse_moe.experts.0.w1.weight": "model-00002-of-00006.safetensors", "model.layers.5.block_sparse_moe.experts.1.w1.weight": "model-00002-of-00006.safetensors", "model.layers.5.block_sparse_moe.experts.2.w1.weight": "model-00002-of-00006.safetensors", "model.layers.5.block_sparse_moe.experts.0.w2.weight": "model-00002-of-00006.safetensors", "model.layers.5.block_sparse_moe.experts.1.w2.weight": "model-00002-of-00006.safetensors", "model.layers.5.block_sparse_moe.experts.2.w2.weight": "model-00002-of-00006.safetensors", "model.layers.6.input_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00002-of-00006.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00002-of-00006.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00002-of-00006.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00002-of-00006.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.6.block_sparse_moe.experts.0.w3.weight": "model-00002-of-00006.safetensors", "model.layers.6.block_sparse_moe.experts.1.w3.weight": "model-00002-of-00006.safetensors", "model.layers.6.block_sparse_moe.experts.2.w3.weight": "model-00002-of-00006.safetensors", "model.layers.6.block_sparse_moe.experts.0.w1.weight": "model-00002-of-00006.safetensors", "model.layers.6.block_sparse_moe.experts.1.w1.weight": "model-00002-of-00006.safetensors", "model.layers.6.block_sparse_moe.experts.2.w1.weight": "model-00002-of-00006.safetensors", "model.layers.6.block_sparse_moe.experts.0.w2.weight": "model-00002-of-00006.safetensors", "model.layers.6.block_sparse_moe.experts.1.w2.weight": "model-00002-of-00006.safetensors", "model.layers.6.block_sparse_moe.experts.2.w2.weight": "model-00002-of-00006.safetensors", "model.layers.7.input_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00006.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00006.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00006.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00006.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.7.block_sparse_moe.experts.0.w3.weight": "model-00002-of-00006.safetensors", "model.layers.7.block_sparse_moe.experts.1.w3.weight": "model-00002-of-00006.safetensors", "model.layers.7.block_sparse_moe.experts.2.w3.weight": "model-00002-of-00006.safetensors", "model.layers.7.block_sparse_moe.experts.0.w1.weight": "model-00002-of-00006.safetensors", "model.layers.7.block_sparse_moe.experts.1.w1.weight": "model-00002-of-00006.safetensors", "model.layers.7.block_sparse_moe.experts.2.w1.weight": "model-00002-of-00006.safetensors", "model.layers.7.block_sparse_moe.experts.0.w2.weight": "model-00002-of-00006.safetensors", "model.layers.7.block_sparse_moe.experts.1.w2.weight": "model-00002-of-00006.safetensors", "model.layers.7.block_sparse_moe.experts.2.w2.weight": "model-00002-of-00006.safetensors", "model.layers.8.input_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00006.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00006.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00006.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00006.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.8.block_sparse_moe.experts.0.w3.weight": "model-00002-of-00006.safetensors", "model.layers.8.block_sparse_moe.experts.1.w3.weight": "model-00003-of-00006.safetensors", "model.layers.8.block_sparse_moe.experts.2.w3.weight": "model-00003-of-00006.safetensors", "model.layers.8.block_sparse_moe.experts.0.w1.weight": "model-00003-of-00006.safetensors", "model.layers.8.block_sparse_moe.experts.1.w1.weight": "model-00003-of-00006.safetensors", "model.layers.8.block_sparse_moe.experts.2.w1.weight": "model-00003-of-00006.safetensors", "model.layers.8.block_sparse_moe.experts.0.w2.weight": "model-00003-of-00006.safetensors", "model.layers.8.block_sparse_moe.experts.1.w2.weight": "model-00003-of-00006.safetensors", "model.layers.8.block_sparse_moe.experts.2.w2.weight": "model-00003-of-00006.safetensors", "model.layers.9.input_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00003-of-00006.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00003-of-00006.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00003-of-00006.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00003-of-00006.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.9.block_sparse_moe.experts.0.w3.weight": "model-00003-of-00006.safetensors", "model.layers.9.block_sparse_moe.experts.1.w3.weight": "model-00003-of-00006.safetensors", "model.layers.9.block_sparse_moe.experts.2.w3.weight": "model-00003-of-00006.safetensors", "model.layers.9.block_sparse_moe.experts.0.w1.weight": "model-00003-of-00006.safetensors", "model.layers.9.block_sparse_moe.experts.1.w1.weight": "model-00003-of-00006.safetensors", "model.layers.9.block_sparse_moe.experts.2.w1.weight": "model-00003-of-00006.safetensors", "model.layers.9.block_sparse_moe.experts.0.w2.weight": "model-00003-of-00006.safetensors", "model.layers.9.block_sparse_moe.experts.1.w2.weight": "model-00003-of-00006.safetensors", "model.layers.9.block_sparse_moe.experts.2.w2.weight": "model-00003-of-00006.safetensors", "model.layers.10.input_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00003-of-00006.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00003-of-00006.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00003-of-00006.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00003-of-00006.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.10.block_sparse_moe.experts.0.w3.weight": "model-00003-of-00006.safetensors", "model.layers.10.block_sparse_moe.experts.1.w3.weight": "model-00003-of-00006.safetensors", "model.layers.10.block_sparse_moe.experts.2.w3.weight": "model-00003-of-00006.safetensors", "model.layers.10.block_sparse_moe.experts.0.w1.weight": "model-00003-of-00006.safetensors", "model.layers.10.block_sparse_moe.experts.1.w1.weight": "model-00003-of-00006.safetensors", "model.layers.10.block_sparse_moe.experts.2.w1.weight": "model-00003-of-00006.safetensors", "model.layers.10.block_sparse_moe.experts.0.w2.weight": "model-00003-of-00006.safetensors", "model.layers.10.block_sparse_moe.experts.1.w2.weight": "model-00003-of-00006.safetensors", "model.layers.10.block_sparse_moe.experts.2.w2.weight": "model-00003-of-00006.safetensors", "model.layers.11.input_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00003-of-00006.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00003-of-00006.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00003-of-00006.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00003-of-00006.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.11.block_sparse_moe.experts.0.w3.weight": "model-00003-of-00006.safetensors", "model.layers.11.block_sparse_moe.experts.1.w3.weight": "model-00003-of-00006.safetensors", "model.layers.11.block_sparse_moe.experts.2.w3.weight": "model-00003-of-00006.safetensors", "model.layers.11.block_sparse_moe.experts.0.w1.weight": "model-00003-of-00006.safetensors", "model.layers.11.block_sparse_moe.experts.1.w1.weight": "model-00003-of-00006.safetensors", "model.layers.11.block_sparse_moe.experts.2.w1.weight": "model-00003-of-00006.safetensors", "model.layers.11.block_sparse_moe.experts.0.w2.weight": "model-00003-of-00006.safetensors", "model.layers.11.block_sparse_moe.experts.1.w2.weight": "model-00003-of-00006.safetensors", "model.layers.11.block_sparse_moe.experts.2.w2.weight": "model-00003-of-00006.safetensors", "model.layers.12.input_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00003-of-00006.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00003-of-00006.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00003-of-00006.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00003-of-00006.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.12.block_sparse_moe.experts.0.w3.weight": "model-00003-of-00006.safetensors", "model.layers.12.block_sparse_moe.experts.1.w3.weight": "model-00003-of-00006.safetensors", "model.layers.12.block_sparse_moe.experts.2.w3.weight": "model-00003-of-00006.safetensors", "model.layers.12.block_sparse_moe.experts.0.w1.weight": "model-00003-of-00006.safetensors", "model.layers.12.block_sparse_moe.experts.1.w1.weight": "model-00003-of-00006.safetensors", "model.layers.12.block_sparse_moe.experts.2.w1.weight": "model-00004-of-00006.safetensors", "model.layers.12.block_sparse_moe.experts.0.w2.weight": "model-00004-of-00006.safetensors", "model.layers.12.block_sparse_moe.experts.1.w2.weight": "model-00004-of-00006.safetensors", "model.layers.12.block_sparse_moe.experts.2.w2.weight": "model-00004-of-00006.safetensors", "model.layers.13.input_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00004-of-00006.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00004-of-00006.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00004-of-00006.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00004-of-00006.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.13.block_sparse_moe.experts.0.w3.weight": "model-00004-of-00006.safetensors", "model.layers.13.block_sparse_moe.experts.1.w3.weight": "model-00004-of-00006.safetensors", "model.layers.13.block_sparse_moe.experts.2.w3.weight": "model-00004-of-00006.safetensors", "model.layers.13.block_sparse_moe.experts.0.w1.weight": "model-00004-of-00006.safetensors", "model.layers.13.block_sparse_moe.experts.1.w1.weight": "model-00004-of-00006.safetensors", "model.layers.13.block_sparse_moe.experts.2.w1.weight": "model-00004-of-00006.safetensors", "model.layers.13.block_sparse_moe.experts.0.w2.weight": "model-00004-of-00006.safetensors", "model.layers.13.block_sparse_moe.experts.1.w2.weight": "model-00004-of-00006.safetensors", "model.layers.13.block_sparse_moe.experts.2.w2.weight": "model-00004-of-00006.safetensors", "model.layers.14.input_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00004-of-00006.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00004-of-00006.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00004-of-00006.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00004-of-00006.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.14.block_sparse_moe.experts.0.w3.weight": "model-00004-of-00006.safetensors", "model.layers.14.block_sparse_moe.experts.1.w3.weight": "model-00004-of-00006.safetensors", "model.layers.14.block_sparse_moe.experts.2.w3.weight": "model-00004-of-00006.safetensors", "model.layers.14.block_sparse_moe.experts.0.w1.weight": "model-00004-of-00006.safetensors", "model.layers.14.block_sparse_moe.experts.1.w1.weight": "model-00004-of-00006.safetensors", "model.layers.14.block_sparse_moe.experts.2.w1.weight": "model-00004-of-00006.safetensors", "model.layers.14.block_sparse_moe.experts.0.w2.weight": "model-00004-of-00006.safetensors", "model.layers.14.block_sparse_moe.experts.1.w2.weight": "model-00004-of-00006.safetensors", "model.layers.14.block_sparse_moe.experts.2.w2.weight": "model-00004-of-00006.safetensors", "model.layers.15.input_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00004-of-00006.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00004-of-00006.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00004-of-00006.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00004-of-00006.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.15.block_sparse_moe.experts.0.w3.weight": "model-00004-of-00006.safetensors", "model.layers.15.block_sparse_moe.experts.1.w3.weight": "model-00004-of-00006.safetensors", "model.layers.15.block_sparse_moe.experts.2.w3.weight": "model-00004-of-00006.safetensors", "model.layers.15.block_sparse_moe.experts.0.w1.weight": "model-00004-of-00006.safetensors", "model.layers.15.block_sparse_moe.experts.1.w1.weight": "model-00004-of-00006.safetensors", "model.layers.15.block_sparse_moe.experts.2.w1.weight": "model-00004-of-00006.safetensors", "model.layers.15.block_sparse_moe.experts.0.w2.weight": "model-00004-of-00006.safetensors", "model.layers.15.block_sparse_moe.experts.1.w2.weight": "model-00004-of-00006.safetensors", "model.layers.15.block_sparse_moe.experts.2.w2.weight": "model-00004-of-00006.safetensors", "model.layers.16.input_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00004-of-00006.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00004-of-00006.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00004-of-00006.safetensors", "model.layers.16.self_attn.o_proj.weight": "model-00004-of-00006.safetensors", "model.layers.16.post_attention_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.16.block_sparse_moe.experts.0.w3.weight": "model-00004-of-00006.safetensors", "model.layers.16.block_sparse_moe.experts.1.w3.weight": "model-00004-of-00006.safetensors", "model.layers.16.block_sparse_moe.experts.2.w3.weight": "model-00004-of-00006.safetensors", "model.layers.16.block_sparse_moe.experts.0.w1.weight": "model-00004-of-00006.safetensors", "model.layers.16.block_sparse_moe.experts.1.w1.weight": "model-00004-of-00006.safetensors", "model.layers.16.block_sparse_moe.experts.2.w1.weight": "model-00004-of-00006.safetensors", "model.layers.16.block_sparse_moe.experts.0.w2.weight": "model-00004-of-00006.safetensors", "model.layers.16.block_sparse_moe.experts.1.w2.weight": "model-00004-of-00006.safetensors", "model.layers.16.block_sparse_moe.experts.2.w2.weight": "model-00004-of-00006.safetensors", "model.layers.17.input_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00005-of-00006.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00005-of-00006.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00005-of-00006.safetensors", "model.layers.17.self_attn.o_proj.weight": "model-00005-of-00006.safetensors", "model.layers.17.post_attention_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.17.block_sparse_moe.experts.0.w3.weight": "model-00005-of-00006.safetensors", "model.layers.17.block_sparse_moe.experts.1.w3.weight": "model-00005-of-00006.safetensors", "model.layers.17.block_sparse_moe.experts.2.w3.weight": "model-00005-of-00006.safetensors", "model.layers.17.block_sparse_moe.experts.0.w1.weight": "model-00005-of-00006.safetensors", "model.layers.17.block_sparse_moe.experts.1.w1.weight": "model-00005-of-00006.safetensors", "model.layers.17.block_sparse_moe.experts.2.w1.weight": "model-00005-of-00006.safetensors", "model.layers.17.block_sparse_moe.experts.0.w2.weight": "model-00005-of-00006.safetensors", "model.layers.17.block_sparse_moe.experts.1.w2.weight": "model-00005-of-00006.safetensors", "model.layers.17.block_sparse_moe.experts.2.w2.weight": "model-00005-of-00006.safetensors", "model.layers.18.input_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00005-of-00006.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00005-of-00006.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00005-of-00006.safetensors", "model.layers.18.self_attn.o_proj.weight": "model-00005-of-00006.safetensors", "model.layers.18.post_attention_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.18.block_sparse_moe.experts.0.w3.weight": "model-00005-of-00006.safetensors", "model.layers.18.block_sparse_moe.experts.1.w3.weight": "model-00005-of-00006.safetensors", "model.layers.18.block_sparse_moe.experts.2.w3.weight": "model-00005-of-00006.safetensors", "model.layers.18.block_sparse_moe.experts.0.w1.weight": "model-00005-of-00006.safetensors", "model.layers.18.block_sparse_moe.experts.1.w1.weight": "model-00005-of-00006.safetensors", "model.layers.18.block_sparse_moe.experts.2.w1.weight": "model-00005-of-00006.safetensors", "model.layers.18.block_sparse_moe.experts.0.w2.weight": "model-00005-of-00006.safetensors", "model.layers.18.block_sparse_moe.experts.1.w2.weight": "model-00005-of-00006.safetensors", "model.layers.18.block_sparse_moe.experts.2.w2.weight": "model-00005-of-00006.safetensors", "model.layers.19.input_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00005-of-00006.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00005-of-00006.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00005-of-00006.safetensors", "model.layers.19.self_attn.o_proj.weight": "model-00005-of-00006.safetensors", "model.layers.19.post_attention_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.19.block_sparse_moe.experts.0.w3.weight": "model-00005-of-00006.safetensors", "model.layers.19.block_sparse_moe.experts.1.w3.weight": "model-00005-of-00006.safetensors", "model.layers.19.block_sparse_moe.experts.2.w3.weight": "model-00005-of-00006.safetensors", "model.layers.19.block_sparse_moe.experts.0.w1.weight": "model-00005-of-00006.safetensors", "model.layers.19.block_sparse_moe.experts.1.w1.weight": "model-00005-of-00006.safetensors", "model.layers.19.block_sparse_moe.experts.2.w1.weight": "model-00005-of-00006.safetensors", "model.layers.19.block_sparse_moe.experts.0.w2.weight": "model-00005-of-00006.safetensors", "model.layers.19.block_sparse_moe.experts.1.w2.weight": "model-00005-of-00006.safetensors", "model.layers.19.block_sparse_moe.experts.2.w2.weight": "model-00005-of-00006.safetensors", "model.layers.20.input_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00005-of-00006.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00005-of-00006.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00005-of-00006.safetensors", "model.layers.20.self_attn.o_proj.weight": "model-00005-of-00006.safetensors", "model.layers.20.post_attention_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.20.block_sparse_moe.experts.0.w3.weight": "model-00005-of-00006.safetensors", "model.layers.20.block_sparse_moe.experts.1.w3.weight": "model-00005-of-00006.safetensors", "model.layers.20.block_sparse_moe.experts.2.w3.weight": "model-00005-of-00006.safetensors", "model.layers.20.block_sparse_moe.experts.0.w1.weight": "model-00005-of-00006.safetensors", "model.layers.20.block_sparse_moe.experts.1.w1.weight": "model-00005-of-00006.safetensors", "model.layers.20.block_sparse_moe.experts.2.w1.weight": "model-00005-of-00006.safetensors", "model.layers.20.block_sparse_moe.experts.0.w2.weight": "model-00005-of-00006.safetensors", "model.layers.20.block_sparse_moe.experts.1.w2.weight": "model-00005-of-00006.safetensors", "model.layers.20.block_sparse_moe.experts.2.w2.weight": "model-00005-of-00006.safetensors", "model.layers.21.input_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00005-of-00006.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00005-of-00006.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00005-of-00006.safetensors", "model.layers.21.self_attn.o_proj.weight": "model-00005-of-00006.safetensors", "model.layers.21.post_attention_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.21.block_sparse_moe.experts.0.w3.weight": "model-00005-of-00006.safetensors", "model.layers.21.block_sparse_moe.experts.1.w3.weight": "model-00005-of-00006.safetensors", "model.layers.21.block_sparse_moe.experts.2.w3.weight": "model-00005-of-00006.safetensors", "model.layers.21.block_sparse_moe.experts.0.w1.weight": "model-00006-of-00006.safetensors", "model.layers.21.block_sparse_moe.experts.1.w1.weight": "model-00006-of-00006.safetensors", "model.layers.21.block_sparse_moe.experts.2.w1.weight": "model-00006-of-00006.safetensors", "model.layers.21.block_sparse_moe.experts.0.w2.weight": "model-00006-of-00006.safetensors", "model.layers.21.block_sparse_moe.experts.1.w2.weight": "model-00006-of-00006.safetensors", "model.layers.21.block_sparse_moe.experts.2.w2.weight": "model-00006-of-00006.safetensors", "model.norm.weight": "model-00006-of-00006.safetensors", "lm_head.weight": "model-00006-of-00006.safetensors", "model.layers.0.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.1.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.2.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.3.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.4.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.5.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.6.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.7.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.8.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.9.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.10.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.11.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.12.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.13.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.14.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.15.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.16.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.17.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.18.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.19.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.20.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors", "model.layers.21.block_sparse_moe.gate.weight": "model-00006-of-00006.safetensors"}}
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|im_end|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "32000": {
31
+ "content": "<|im_end|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "32001": {
39
+ "content": "<|im_start|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": false
45
+ }
46
+ },
47
+ "bos_token": "<s>",
48
+ "clean_up_tokenization_spaces": false,
49
+ "eos_token": "<|im_end|>",
50
+ "legacy": false,
51
+ "model_max_length": 1000000000000000019884624838656,
52
+ "pad_token": "<s>",
53
+ "padding_side": "left",
54
+ "sp_model_kwargs": {},
55
+ "spaces_between_special_tokens": false,
56
+ "tokenizer_class": "LlamaTokenizer",
57
+ "trust_remote_code": false,
58
+ "unk_token": "<unk>",
59
+ "use_default_system_prompt": false,
60
+ "use_fast": true
61
+ }