Nick088 commited on
Commit
eb7f4aa
1 Parent(s): 78cc8b8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -8
app.py CHANGED
@@ -20,7 +20,6 @@ model.to(device)
20
 
21
 
22
  def generate(
23
- precision_model,
24
  system_prompt,
25
  prompt,
26
  max_new_tokens,
@@ -30,10 +29,6 @@ def generate(
30
  top_k,
31
  seed
32
  ):
33
-
34
- model = T5ForConditionalGeneration.from_pretrained("roborovski/superprompt-v1", torch_dtype=precision_model)
35
-
36
- model.to(device)
37
 
38
  input_text = f"{system_prompt}, {prompt}"
39
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
@@ -59,8 +54,6 @@ def generate(
59
  return better_prompt
60
 
61
 
62
- precision_model = gr.Radio([('fp32', torch.float32), ('fp16', torch.float16)], value=torch.float16, label="Model Precision Type", info="fp32 is more precised but slower, fp16 is faster and less resource consuming but less pricse")
63
-
64
  prompt = gr.Textbox(label="Prompt", interactive=True)
65
 
66
  system_prompt = gr.Textbox(label="System Prompt", interactive=True)
@@ -79,7 +72,6 @@ seed = gr.Number(value=42, interactive=True, label="Seed", info="A starting poin
79
 
80
  examples = [
81
  [
82
- torch.float16,
83
  "A storefront with 'Text to Image' written on it.",
84
  "Expand the following prompt to add more detail:",
85
  512,
 
20
 
21
 
22
  def generate(
 
23
  system_prompt,
24
  prompt,
25
  max_new_tokens,
 
29
  top_k,
30
  seed
31
  ):
 
 
 
 
32
 
33
  input_text = f"{system_prompt}, {prompt}"
34
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
 
54
  return better_prompt
55
 
56
 
 
 
57
  prompt = gr.Textbox(label="Prompt", interactive=True)
58
 
59
  system_prompt = gr.Textbox(label="System Prompt", interactive=True)
 
72
 
73
  examples = [
74
  [
 
75
  "A storefront with 'Text to Image' written on it.",
76
  "Expand the following prompt to add more detail:",
77
  512,