Spaces:
Running
Running
add back model urls (#848)
Browse files- .env.template +7 -0
.env.template
CHANGED
@@ -5,6 +5,7 @@ MODELS=`[
|
|
5 |
"name" : "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
6 |
"description" : "The latest MoE model from Mistral AI! 8x7B and outperforms Llama 2 70B in most benchmarks.",
|
7 |
"websiteUrl" : "https://mistral.ai/news/mixtral-of-experts/",
|
|
|
8 |
"preprompt" : "",
|
9 |
"chatPromptTemplate": "<s> {{#each messages}}{{#ifUser}}[INST]{{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}} {{content}} [/INST]{{/ifUser}}{{#ifAssistant}} {{content}}</s> {{/ifAssistant}}{{/each}}",
|
10 |
"parameters" : {
|
@@ -33,6 +34,7 @@ MODELS=`[
|
|
33 |
"name": "meta-llama/Llama-2-70b-chat-hf",
|
34 |
"description": "The latest and biggest model from Meta, fine-tuned for chat.",
|
35 |
"websiteUrl": "https://ai.meta.com/llama/",
|
|
|
36 |
"preprompt": " ",
|
37 |
"chatPromptTemplate" : "<s>[INST] <<SYS>>\n{{preprompt}}\n<</SYS>>\n\n{{#each messages}}{{#ifUser}}{{content}} [/INST] {{/ifUser}}{{#ifAssistant}}{{content}} </s><s>[INST] {{/ifAssistant}}{{/each}}",
|
38 |
"promptExamples": [
|
@@ -61,6 +63,7 @@ MODELS=`[
|
|
61 |
"name" : "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
62 |
"description" : "Nous Hermes 2 Mixtral 8x7B DPO is the new flagship Nous Research model trained over the Mixtral 8x7B MoE LLM.",
|
63 |
"websiteUrl" : "https://nousresearch.com/",
|
|
|
64 |
"chatPromptTemplate" : "{{#if @root.preprompt}}<|im_start|>system\n{{@root.preprompt}}<|im_end|>\n{{/if}}{{#each messages}}{{#ifUser}}<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n{{/ifUser}}{{#ifAssistant}}{{content}}<|im_end|>\n{{/ifAssistant}}{{/each}}",
|
65 |
"promptExamples": [
|
66 |
{
|
@@ -89,6 +92,7 @@ MODELS=`[
|
|
89 |
"displayName": "codellama/CodeLlama-70b-Instruct-hf",
|
90 |
"description": "Code Llama, a state of the art code model from Meta. Now in 70B!",
|
91 |
"websiteUrl": "https://ai.meta.com/blog/code-llama-large-language-model-coding/",
|
|
|
92 |
"preprompt": "",
|
93 |
"chatPromptTemplate" : "<s>{{#if @root.preprompt}}Source: system\n\n {{@root.preprompt}} <step> {{/if}}{{#each messages}}{{#ifUser}}Source: user\n\n {{content}} <step> {{/ifUser}}{{#ifAssistant}}Source: assistant\n\n {{content}} <step> {{/ifAssistant}}{{/each}}Source: assistant\nDestination: user\n\n ",
|
94 |
"promptExamples": [
|
@@ -118,6 +122,7 @@ MODELS=`[
|
|
118 |
"displayName": "mistralai/Mistral-7B-Instruct-v0.1",
|
119 |
"description": "Mistral 7B is a new Apache 2.0 model, released by Mistral AI that outperforms Llama2 13B in benchmarks.",
|
120 |
"websiteUrl": "https://mistral.ai/news/announcing-mistral-7b/",
|
|
|
121 |
"preprompt": "",
|
122 |
"chatPromptTemplate" : "<s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s>{{/ifAssistant}}{{/each}}",
|
123 |
"parameters": {
|
@@ -148,6 +153,7 @@ MODELS=`[
|
|
148 |
"displayName": "mistralai/Mistral-7B-Instruct-v0.2",
|
149 |
"description": "Mistral 7B is a new Apache 2.0 model, released by Mistral AI that outperforms Llama2 13B in benchmarks.",
|
150 |
"websiteUrl": "https://mistral.ai/news/announcing-mistral-7b/",
|
|
|
151 |
"preprompt": "",
|
152 |
"chatPromptTemplate" : "<s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s>{{/ifAssistant}}{{/each}}",
|
153 |
"parameters": {
|
@@ -177,6 +183,7 @@ MODELS=`[
|
|
177 |
"displayName": "openchat/openchat-3.5-0106",
|
178 |
"description": "OpenChat 3.5 is the #1 model on MT-Bench, with only 7B parameters.",
|
179 |
"websiteUrl": "https://huggingface.co/openchat/openchat-3.5-0106",
|
|
|
180 |
"preprompt": "",
|
181 |
"chatPromptTemplate" : "<s>{{#each messages}}{{#ifUser}}GPT4 Correct User: {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}}<|end_of_turn|>GPT4 Correct Assistant:{{/ifUser}}{{#ifAssistant}}{{content}}<|end_of_turn|>{{/ifAssistant}}{{/each}}",
|
182 |
"parameters": {
|
|
|
5 |
"name" : "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
6 |
"description" : "The latest MoE model from Mistral AI! 8x7B and outperforms Llama 2 70B in most benchmarks.",
|
7 |
"websiteUrl" : "https://mistral.ai/news/mixtral-of-experts/",
|
8 |
+
"modelUrl": "https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1",
|
9 |
"preprompt" : "",
|
10 |
"chatPromptTemplate": "<s> {{#each messages}}{{#ifUser}}[INST]{{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}} {{content}} [/INST]{{/ifUser}}{{#ifAssistant}} {{content}}</s> {{/ifAssistant}}{{/each}}",
|
11 |
"parameters" : {
|
|
|
34 |
"name": "meta-llama/Llama-2-70b-chat-hf",
|
35 |
"description": "The latest and biggest model from Meta, fine-tuned for chat.",
|
36 |
"websiteUrl": "https://ai.meta.com/llama/",
|
37 |
+
"modelUrl": "https://huggingface.co/meta-llama/Llama-2-70b-chat-hf",
|
38 |
"preprompt": " ",
|
39 |
"chatPromptTemplate" : "<s>[INST] <<SYS>>\n{{preprompt}}\n<</SYS>>\n\n{{#each messages}}{{#ifUser}}{{content}} [/INST] {{/ifUser}}{{#ifAssistant}}{{content}} </s><s>[INST] {{/ifAssistant}}{{/each}}",
|
40 |
"promptExamples": [
|
|
|
63 |
"name" : "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
64 |
"description" : "Nous Hermes 2 Mixtral 8x7B DPO is the new flagship Nous Research model trained over the Mixtral 8x7B MoE LLM.",
|
65 |
"websiteUrl" : "https://nousresearch.com/",
|
66 |
+
"modelUrl": "https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
67 |
"chatPromptTemplate" : "{{#if @root.preprompt}}<|im_start|>system\n{{@root.preprompt}}<|im_end|>\n{{/if}}{{#each messages}}{{#ifUser}}<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n{{/ifUser}}{{#ifAssistant}}{{content}}<|im_end|>\n{{/ifAssistant}}{{/each}}",
|
68 |
"promptExamples": [
|
69 |
{
|
|
|
92 |
"displayName": "codellama/CodeLlama-70b-Instruct-hf",
|
93 |
"description": "Code Llama, a state of the art code model from Meta. Now in 70B!",
|
94 |
"websiteUrl": "https://ai.meta.com/blog/code-llama-large-language-model-coding/",
|
95 |
+
"modelUrl": "https://huggingface.co/codellama/CodeLlama-70b-Instruct-hf",
|
96 |
"preprompt": "",
|
97 |
"chatPromptTemplate" : "<s>{{#if @root.preprompt}}Source: system\n\n {{@root.preprompt}} <step> {{/if}}{{#each messages}}{{#ifUser}}Source: user\n\n {{content}} <step> {{/ifUser}}{{#ifAssistant}}Source: assistant\n\n {{content}} <step> {{/ifAssistant}}{{/each}}Source: assistant\nDestination: user\n\n ",
|
98 |
"promptExamples": [
|
|
|
122 |
"displayName": "mistralai/Mistral-7B-Instruct-v0.1",
|
123 |
"description": "Mistral 7B is a new Apache 2.0 model, released by Mistral AI that outperforms Llama2 13B in benchmarks.",
|
124 |
"websiteUrl": "https://mistral.ai/news/announcing-mistral-7b/",
|
125 |
+
"modelUrl": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1",
|
126 |
"preprompt": "",
|
127 |
"chatPromptTemplate" : "<s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s>{{/ifAssistant}}{{/each}}",
|
128 |
"parameters": {
|
|
|
153 |
"displayName": "mistralai/Mistral-7B-Instruct-v0.2",
|
154 |
"description": "Mistral 7B is a new Apache 2.0 model, released by Mistral AI that outperforms Llama2 13B in benchmarks.",
|
155 |
"websiteUrl": "https://mistral.ai/news/announcing-mistral-7b/",
|
156 |
+
"modelUrl": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2",
|
157 |
"preprompt": "",
|
158 |
"chatPromptTemplate" : "<s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s>{{/ifAssistant}}{{/each}}",
|
159 |
"parameters": {
|
|
|
183 |
"displayName": "openchat/openchat-3.5-0106",
|
184 |
"description": "OpenChat 3.5 is the #1 model on MT-Bench, with only 7B parameters.",
|
185 |
"websiteUrl": "https://huggingface.co/openchat/openchat-3.5-0106",
|
186 |
+
"modelUrl": "https://huggingface.co/openchat/openchat-3.5-0106",
|
187 |
"preprompt": "",
|
188 |
"chatPromptTemplate" : "<s>{{#each messages}}{{#ifUser}}GPT4 Correct User: {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}}<|end_of_turn|>GPT4 Correct Assistant:{{/ifUser}}{{#ifAssistant}}{{content}}<|end_of_turn|>{{/ifAssistant}}{{/each}}",
|
189 |
"parameters": {
|