gokaygokay
commited on
Commit
•
08456a1
1
Parent(s):
d36da76
Update app.py
Browse files
app.py
CHANGED
@@ -262,9 +262,10 @@ class PromptGenerator:
|
|
262 |
class HuggingFaceInferenceNode:
|
263 |
def __init__(self):
|
264 |
self.clients = {
|
|
|
265 |
"Mixtral": InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"),
|
266 |
"Mistral": InferenceClient("mistralai/Mistral-7B-Instruct-v0.3"),
|
267 |
-
"Llama": InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct"),
|
268 |
"Mistral-Nemo": InferenceClient("mistralai/Mistral-Nemo-Instruct-2407")
|
269 |
}
|
270 |
self.prompts_dir = "./prompts"
|
@@ -382,7 +383,7 @@ def create_interface():
|
|
382 |
)
|
383 |
|
384 |
with gr.Tab("HuggingFace Inference Text Generator"):
|
385 |
-
model = gr.Dropdown(["Mixtral", "Mistral", "Llama", "Mistral-Nemo"], label="Model", value="
|
386 |
input_text = gr.Textbox(label="Input Text", lines=5)
|
387 |
happy_talk = gr.Checkbox(label="Happy Talk", value=True)
|
388 |
compress = gr.Checkbox(label="Compress", value=False)
|
|
|
262 |
class HuggingFaceInferenceNode:
|
263 |
def __init__(self):
|
264 |
self.clients = {
|
265 |
+
"Llama 3.1": InferenceClient("meta-llama/Meta-Llama-3.1-8B-Instruct"),
|
266 |
"Mixtral": InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"),
|
267 |
"Mistral": InferenceClient("mistralai/Mistral-7B-Instruct-v0.3"),
|
268 |
+
"Llama 3": InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct"),
|
269 |
"Mistral-Nemo": InferenceClient("mistralai/Mistral-Nemo-Instruct-2407")
|
270 |
}
|
271 |
self.prompts_dir = "./prompts"
|
|
|
383 |
)
|
384 |
|
385 |
with gr.Tab("HuggingFace Inference Text Generator"):
|
386 |
+
model = gr.Dropdown(["Llama 3.1", "Mixtral", "Mistral", "Llama 3", "Mistral-Nemo"], label="Model", value="Llama 3.1")
|
387 |
input_text = gr.Textbox(label="Input Text", lines=5)
|
388 |
happy_talk = gr.Checkbox(label="Happy Talk", value=True)
|
389 |
compress = gr.Checkbox(label="Compress", value=False)
|