gagan3012 commited on
Commit
cb8f5ff
1 Parent(s): 6c3aae0

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - moe
5
+ - mergekit
6
+ - merge
7
+ - chinese
8
+ - arabic
9
+ - english
10
+ - multilingual
11
+ - german
12
+ - french
13
+ - openchat/openchat-3.5-1210
14
+ - beowolx/CodeNinja-1.0-OpenChat-7B
15
+ - maywell/PiVoT-0.1-Starling-LM-RP
16
+ - WizardLM/WizardMath-7B-V1.1
17
+ - davidkim205/komt-mistral-7b-v1
18
+ - OpenBuddy/openbuddy-zephyr-7b-v14.1
19
+ - manishiitg/open-aditi-hi-v1
20
+ - VAGOsolutions/SauerkrautLM-7b-v1-mistral
21
+ ---
22
+
23
+ # MetaModel_moe_multilingualv2
24
+
25
+ This model is a Mixure of Experts (MoE) made with [mergekit](https://github.com/cg123/mergekit) (mixtral branch). It uses the following base models:
26
+ * [openchat/openchat-3.5-1210](https://huggingface.co/openchat/openchat-3.5-1210)
27
+ * [beowolx/CodeNinja-1.0-OpenChat-7B](https://huggingface.co/beowolx/CodeNinja-1.0-OpenChat-7B)
28
+ * [maywell/PiVoT-0.1-Starling-LM-RP](https://huggingface.co/maywell/PiVoT-0.1-Starling-LM-RP)
29
+ * [WizardLM/WizardMath-7B-V1.1](https://huggingface.co/WizardLM/WizardMath-7B-V1.1)
30
+ * [davidkim205/komt-mistral-7b-v1](https://huggingface.co/davidkim205/komt-mistral-7b-v1)
31
+ * [OpenBuddy/openbuddy-zephyr-7b-v14.1](https://huggingface.co/OpenBuddy/openbuddy-zephyr-7b-v14.1)
32
+ * [manishiitg/open-aditi-hi-v1](https://huggingface.co/manishiitg/open-aditi-hi-v1)
33
+ * [VAGOsolutions/SauerkrautLM-7b-v1-mistral](https://huggingface.co/VAGOsolutions/SauerkrautLM-7b-v1-mistral)
34
+
35
+ ## 🧩 Configuration
36
+
37
+ ```yamlbase_model: mlabonne/NeuralMarcoro14-7B
38
+ dtype: bfloat16
39
+ experts:
40
+ - positive_prompts:
41
+ - chat
42
+ - assistant
43
+ - tell me
44
+ - explain
45
+ source_model: openchat/openchat-3.5-1210
46
+ - positive_prompts:
47
+ - code
48
+ - python
49
+ - javascript
50
+ - programming
51
+ - algorithm
52
+ source_model: beowolx/CodeNinja-1.0-OpenChat-7B
53
+ - positive_prompts:
54
+ - storywriting
55
+ - write
56
+ - scene
57
+ - story
58
+ - character
59
+ source_model: maywell/PiVoT-0.1-Starling-LM-RP
60
+ - positive_prompts:
61
+ - reason
62
+ - math
63
+ - mathematics
64
+ - solve
65
+ - count
66
+ source_model: WizardLM/WizardMath-7B-V1.1
67
+ - positive_prompts:
68
+ - korean
69
+ - answer in korean
70
+ - korea
71
+ source_model: davidkim205/komt-mistral-7b-v1
72
+ - positive_prompts:
73
+ - chinese
74
+ - china
75
+ - answer in chinese
76
+ source_model: OpenBuddy/openbuddy-zephyr-7b-v14.1
77
+ - positive_prompts:
78
+ - hindi
79
+ - india
80
+ - hindu
81
+ - answer in hindi
82
+ source_model: manishiitg/open-aditi-hi-v1
83
+ - positive_prompts:
84
+ - german
85
+ - germany
86
+ - answer in german
87
+ - deutsch
88
+ source_model: VAGOsolutions/SauerkrautLM-7b-v1-mistral
89
+ gate_mode: hidden
90
+ ```
91
+
92
+ ## 💻 Usage
93
+
94
+ ```python
95
+ !pip install -qU transformers bitsandbytes accelerate
96
+
97
+ from transformers import AutoTokenizer
98
+ import transformers
99
+ import torch
100
+
101
+ model = "gagan3012/MetaModel_moe_multilingualv2"
102
+
103
+ tokenizer = AutoTokenizer.from_pretrained(model)
104
+ pipeline = transformers.pipeline(
105
+ "text-generation",
106
+ model=model,
107
+ model_kwargs={"torch_dtype": torch.float16, "load_in_4bit": True},
108
+ )
109
+
110
+ messages = [{"role": "user", "content": "Explain what a Mixture of Experts is in less than 100 words."}]
111
+ prompt = pipeline.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
112
+ outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
113
+ print(outputs[0]["generated_text"])
114
+ ```
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "mlabonne/NeuralMarcoro14-7B",
3
+ "architectures": [
4
+ "MixtralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 4096,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 14336,
13
+ "max_position_embeddings": 32768,
14
+ "model_type": "mixtral",
15
+ "num_attention_heads": 32,
16
+ "num_experts_per_tok": 2,
17
+ "num_hidden_layers": 32,
18
+ "num_key_value_heads": 8,
19
+ "num_local_experts": 8,
20
+ "output_router_logits": false,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_theta": 10000.0,
23
+ "router_aux_loss_coef": 0.001,
24
+ "sliding_window": null,
25
+ "tie_word_embeddings": false,
26
+ "torch_dtype": "float16",
27
+ "transformers_version": "4.36.2",
28
+ "use_cache": true,
29
+ "vocab_size": 32000
30
+ }
model-1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0adbacc08a9825c73631da8dbbb2a758b01090a9cb6aa7597f79c8e2136f0516
3
+ size 9919813664
model-2.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a034f3d253e5ff152d104649b819548e85415436ae7561fa2551bc55d811680
3
+ size 9982454760
model-3.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fc87aedde5bea472c14e7fe7d48f5717c745fc068d3dc50501925d9bd62514c
3
+ size 9982454760
model-4.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d14fdf959bb469a33e61c9ef560afe197cae1bbe07395d78e7f6bf4cd88dbbbc
3
+ size 9982454680
model-5.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fd4200ee64034f3c8ba22683ffcc4783aca7ab3e475c737f6ffef7592b00190
3
+ size 9982454760
model-6.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c083298e0ed5878fa0f990bc7bc7cd904baab3ffb4cf0f0a27e18769cfff2337
3
+ size 9982454760
model-7.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:325fbc097fdfef5532373196c0c2462a9d0dd132c7ae39edf127e7b18c5717dd
3
+ size 9982454680
model-8.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9892556cf515c9cfebe2f1562bd7a6cabeae1a8063fcc2f1e2f315f1e3b69bd3
3
+ size 9982454760
model-9.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd1656351f519ecb9af2a32e8748f8993a6301d4b7855499b94c284db72797ab
3
+ size 9982454760