Nick088 commited on
Commit
78cc8b8
1 Parent(s): 9f87e56

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -59,7 +59,7 @@ def generate(
59
  return better_prompt
60
 
61
 
62
- precision_model = gr.Radio([('fp32', torch.float32), ('fp16', torch.float16)], value='fp16', label="Model Precision Type", info="fp32 is more precised but slower, fp16 is faster and less resource consuming but less pricse")
63
 
64
  prompt = gr.Textbox(label="Prompt", interactive=True)
65
 
@@ -94,7 +94,7 @@ examples = [
94
  gr.Interface(
95
  fn=generate,
96
  inputs=[precision_model, prompt, system_prompt, max_new_tokens, repetition_penalty, temperature, top_p, top_k, seed],
97
- outputs=gr.Textbox(label="Better Prompt", interactive=True),
98
  title="SuperPrompt-v1",
99
  description="Make your prompts more detailed!<br>Model used: https://huggingface.co/roborovski/superprompt-v1<br>Hugging Face Space made by [Nick088](https://linktr.ee/Nick088)",
100
  examples=examples,
 
59
  return better_prompt
60
 
61
 
62
+ precision_model = gr.Radio([('fp32', torch.float32), ('fp16', torch.float16)], value=torch.float16, label="Model Precision Type", info="fp32 is more precised but slower, fp16 is faster and less resource consuming but less pricse")
63
 
64
  prompt = gr.Textbox(label="Prompt", interactive=True)
65
 
 
94
  gr.Interface(
95
  fn=generate,
96
  inputs=[precision_model, prompt, system_prompt, max_new_tokens, repetition_penalty, temperature, top_p, top_k, seed],
97
+ outputs=gr.Textbox(label="Better Prompt"),
98
  title="SuperPrompt-v1",
99
  description="Make your prompts more detailed!<br>Model used: https://huggingface.co/roborovski/superprompt-v1<br>Hugging Face Space made by [Nick088](https://linktr.ee/Nick088)",
100
  examples=examples,