Akbartus commited on
Commit
2df43d0
·
verified ·
1 Parent(s): 1da0f92

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -11
app.py CHANGED
@@ -2,11 +2,10 @@ import os
2
  import gradio as gr
3
  import numpy as np
4
  import random
5
- from huggingface_hub import AsyncInferenceClient
6
  from translatepy import Translator
7
  import requests
8
  import re
9
- import asyncio
10
  from PIL import Image
11
  from gradio_client import Client, handle_file
12
  from huggingface_hub import login
@@ -18,19 +17,19 @@ MAX_SEED = np.iinfo(np.int32).max
18
  def enable_lora(lora_add, basemodel):
19
  return basemodel if not lora_add else lora_add
20
 
21
- async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
22
  try:
23
  if seed == -1:
24
  seed = random.randint(0, MAX_SEED)
25
  print(seed)
26
  seed = int(seed)
27
-
28
  text = str(Translator().translate(prompt, 'English')) + "," + lora_word
29
- client = AsyncInferenceClient()
30
- image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
31
  return image, seed
32
  except Exception as e:
33
- print(f"Error generando imagen: {e}")
34
  return None, None
35
 
36
  def get_upscale_finegrain(prompt, img_path, upscale_factor):
@@ -39,13 +38,13 @@ def get_upscale_finegrain(prompt, img_path, upscale_factor):
39
  result = client.predict(input_image=handle_file(img_path), prompt=prompt, negative_prompt="", seed=42, upscale_factor=upscale_factor, controlnet_scale=0.6, controlnet_decay=1, condition_scale=6, tile_width=112, tile_height=144, denoise_strength=0.35, num_inference_steps=18, solver="DDIM", api_name="/process")
40
  return result[1]
41
  except Exception as e:
42
- print(f"Error escalando imagen: {e}")
43
  return None
44
 
45
- async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
46
  model = enable_lora(lora_model, basemodel) if process_lora else basemodel
47
 
48
- image, seed = await generate_image(prompt, model, "", width, height, scales, steps, seed)
49
  if image is None:
50
  return [None, None]
51
 
@@ -90,4 +89,4 @@ with gr.Blocks(css=css) as demo:
90
 
91
  btn = gr.Button("Generate")
92
  btn.click(fn=gen, inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora], outputs=output_res,)
93
- demo.launch()
 
2
  import gradio as gr
3
  import numpy as np
4
  import random
5
+ from huggingface_hub import InferenceClient # Replaced AsyncInferenceClient with InferenceClient
6
  from translatepy import Translator
7
  import requests
8
  import re
 
9
  from PIL import Image
10
  from gradio_client import Client, handle_file
11
  from huggingface_hub import login
 
17
  def enable_lora(lora_add, basemodel):
18
  return basemodel if not lora_add else lora_add
19
 
20
+ def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
21
  try:
22
  if seed == -1:
23
  seed = random.randint(0, MAX_SEED)
24
  print(seed)
25
  seed = int(seed)
26
+
27
  text = str(Translator().translate(prompt, 'English')) + "," + lora_word
28
+ client = InferenceClient() # Using synchronous client instead of async
29
+ image = client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
30
  return image, seed
31
  except Exception as e:
32
+ print(f"Error generating image: {e}")
33
  return None, None
34
 
35
  def get_upscale_finegrain(prompt, img_path, upscale_factor):
 
38
  result = client.predict(input_image=handle_file(img_path), prompt=prompt, negative_prompt="", seed=42, upscale_factor=upscale_factor, controlnet_scale=0.6, controlnet_decay=1, condition_scale=6, tile_width=112, tile_height=144, denoise_strength=0.35, num_inference_steps=18, solver="DDIM", api_name="/process")
39
  return result[1]
40
  except Exception as e:
41
+ print(f"Error upscaling image: {e}")
42
  return None
43
 
44
+ def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
45
  model = enable_lora(lora_model, basemodel) if process_lora else basemodel
46
 
47
+ image, seed = generate_image(prompt, model, "", width, height, scales, steps, seed)
48
  if image is None:
49
  return [None, None]
50
 
 
89
 
90
  btn = gr.Button("Generate")
91
  btn.click(fn=gen, inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora], outputs=output_res,)
92
+ demo.launch()