shweaung commited on
Commit
1134d0a
·
verified ·
1 Parent(s): d1e2f0d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -30
app.py CHANGED
@@ -3,8 +3,11 @@ import requests
3
  import io
4
  import random
5
  import os
 
6
  from PIL import Image
7
  from deep_translator import GoogleTranslator
 
 
8
 
9
  API_TOKEN = os.getenv("HF_READ_TOKEN")
10
  headers = {"Authorization": f"Bearer {API_TOKEN}"}
@@ -23,19 +26,27 @@ article_text = """
23
  </div>
24
  """
25
 
26
- def query(lora_id, prompt, steps=28, cfg_scale=3.5, randomize_seed=True, seed=-1, width=1024, height=1024, output_format="PNG"):
27
- if not prompt:
28
- return None, None, None
29
 
30
- if not lora_id.strip():
31
- lora_id = "black-forest-labs/FLUX.1-dev"
32
 
33
  key = random.randint(0, 999)
34
- API_URL = "https://api-inference.huggingface.co/models/" + lora_id.strip()
 
 
 
 
35
 
 
 
 
36
  prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
 
37
 
38
- # Generate a random seed if needed
39
  if randomize_seed:
40
  seed = random.randint(1, 4294967296)
41
 
@@ -45,12 +56,11 @@ def query(lora_id, prompt, steps=28, cfg_scale=3.5, randomize_seed=True, seed=-1
45
  "cfg_scale": cfg_scale,
46
  "seed": seed,
47
  "parameters": {
48
- "width": width,
49
- "height": height
50
  }
51
  }
52
 
53
- # Send the request and get the image data
54
  response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
55
  if response.status_code != 200:
56
  print(f"Error: Failed to get image. Response status: {response.status_code}")
@@ -60,20 +70,13 @@ def query(lora_id, prompt, steps=28, cfg_scale=3.5, randomize_seed=True, seed=-1
60
  raise gr.Error(f"{response.status_code}")
61
 
62
  try:
63
- # Load and convert the image to the specified format (JPEG or PNG)
64
  image_bytes = response.content
65
  image = Image.open(io.BytesIO(image_bytes))
66
  print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})')
67
-
68
- img_byte_arr = io.BytesIO()
69
- image.save(img_byte_arr, format=output_format)
70
- img_byte_arr.seek(0)
71
-
72
- # Prepare for Gradio's download
73
- return image, seed, img_byte_arr
74
  except Exception as e:
75
  print(f"Error when trying to open the image: {e}")
76
- return None, None, None
77
 
78
 
79
  examples = [
@@ -109,28 +112,23 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', css=css) as app:
109
  with gr.Row():
110
  steps = gr.Slider(label="Sampling steps", value=28, minimum=1, maximum=100, step=1)
111
  cfg = gr.Slider(label="CFG Scale", value=3.5, minimum=1, maximum=20, step=0.5)
112
- output_format = gr.Dropdown(label="Output Format", choices=["JPEG", "PNG"], value="PNG")
113
 
114
  with gr.Row():
115
  text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
116
  with gr.Row():
117
  image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
118
  with gr.Row():
119
- seed_output = gr.Textbox(label="Seed Used", show_copy_button=True, elem_id="seed-output")
120
- with gr.Row():
121
- download_button = gr.File(label="Download Image", elem_id="download-button")
122
 
123
  gr.Markdown(article_text)
124
 
125
  gr.Examples(
126
- examples=examples,
127
- inputs=[text_prompt],
128
  )
129
 
130
- def download_image(image_data):
131
- return image_data
132
-
133
- text_button.click(query, inputs=[custom_lora, text_prompt, steps, cfg, randomize_seed, seed, width, height, output_format], outputs=[image_output, seed_output, download_button])
134
- download_button.change(download_image, inputs=[download_button], outputs=download_button)
135
 
136
  app.launch(show_api=False, share=True)
 
3
  import io
4
  import random
5
  import os
6
+ import time
7
  from PIL import Image
8
  from deep_translator import GoogleTranslator
9
+ import json
10
+
11
 
12
  API_TOKEN = os.getenv("HF_READ_TOKEN")
13
  headers = {"Authorization": f"Bearer {API_TOKEN}"}
 
26
  </div>
27
  """
28
 
29
+ def query(lora_id, prompt, steps=28, cfg_scale=3.5, randomize_seed=True, seed=-1, width=1024, height=1024):
30
+ if prompt == "" or prompt == None:
31
+ return None
32
 
33
+ if lora_id.strip() == "" or lora_id == None:
34
+ lora_id = "black-forest-labs/FLUX.1-dev"
35
 
36
  key = random.randint(0, 999)
37
+
38
+ API_URL = "https://api-inference.huggingface.co/models/"+ lora_id.strip()
39
+
40
+ API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")])
41
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
42
 
43
+ #prompt = GoogleTranslator(source='my', target='en').translate(prompt)
44
+ # print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')
45
+
46
  prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
47
+ # print(f'\033[1mGeneration {key}:\033[0m {prompt}')
48
 
49
+ # If seed is -1, generate a random seed and use it
50
  if randomize_seed:
51
  seed = random.randint(1, 4294967296)
52
 
 
56
  "cfg_scale": cfg_scale,
57
  "seed": seed,
58
  "parameters": {
59
+ "width": width, # Pass the width to the API
60
+ "height": height # Pass the height to the API
61
  }
62
  }
63
 
 
64
  response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
65
  if response.status_code != 200:
66
  print(f"Error: Failed to get image. Response status: {response.status_code}")
 
70
  raise gr.Error(f"{response.status_code}")
71
 
72
  try:
 
73
  image_bytes = response.content
74
  image = Image.open(io.BytesIO(image_bytes))
75
  print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})')
76
+ return image, seed, seed
 
 
 
 
 
 
77
  except Exception as e:
78
  print(f"Error when trying to open the image: {e}")
79
+ return None
80
 
81
 
82
  examples = [
 
112
  with gr.Row():
113
  steps = gr.Slider(label="Sampling steps", value=28, minimum=1, maximum=100, step=1)
114
  cfg = gr.Slider(label="CFG Scale", value=3.5, minimum=1, maximum=20, step=0.5)
115
+ # method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
116
 
117
  with gr.Row():
118
  text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
119
  with gr.Row():
120
  image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
121
  with gr.Row():
122
+ seed_output = gr.Textbox(label="Seed Used", show_copy_button = True, elem_id="seed-output")
 
 
123
 
124
  gr.Markdown(article_text)
125
 
126
  gr.Examples(
127
+ examples = examples,
128
+ inputs = [text_prompt],
129
  )
130
 
131
+
132
+ text_button.click(query, inputs=[custom_lora, text_prompt, steps, cfg, randomize_seed, seed, width, height], outputs=[image_output,seed_output, seed])
 
 
 
133
 
134
  app.launch(show_api=False, share=True)