Spaces:
Runtime error
Runtime error
Locutusque
commited on
Commit
•
22770d6
1
Parent(s):
daecaae
Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,7 @@ import os
|
|
9 |
# Install flash-attn
|
10 |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
11 |
# Initialize the model pipeline
|
12 |
-
generator = pipeline('text-generation', model='
|
13 |
@spaces.GPU
|
14 |
def generate_text(prompt, temperature, top_p, top_k, repetition_penalty, max_length):
|
15 |
# Generate text using the model
|
@@ -43,8 +43,8 @@ iface = gr.Interface(
|
|
43 |
gr.Slider(minimum=5, maximum=4096, step=5, value=1024, label="Max Length")
|
44 |
],
|
45 |
outputs=gr.Textbox(label="Generated Text"),
|
46 |
-
title="
|
47 |
-
description="Try out the
|
48 |
)
|
49 |
|
50 |
iface.launch()
|
|
|
9 |
# Install flash-attn
|
10 |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
11 |
# Initialize the model pipeline
|
12 |
+
generator = pipeline('text-generation', model='hydra-project/ChatHercules-2.5-Mistral-7B', torch_dtype=torch.bfloat16, token=os.environ["HF"])
|
13 |
@spaces.GPU
|
14 |
def generate_text(prompt, temperature, top_p, top_k, repetition_penalty, max_length):
|
15 |
# Generate text using the model
|
|
|
43 |
gr.Slider(minimum=5, maximum=4096, step=5, value=1024, label="Max Length")
|
44 |
],
|
45 |
outputs=gr.Textbox(label="Generated Text"),
|
46 |
+
title="ChatHercules-2.5-Mistral-7B",
|
47 |
+
description="Try out the ChatHercules-2.5-Mistral-7B model for free! This is a preview version, and the model will be released soon"
|
48 |
)
|
49 |
|
50 |
iface.launch()
|