seawolf2357 commited on
Commit
7ed38a8
1 Parent(s): 7b436d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -51
app.py CHANGED
@@ -6,6 +6,7 @@ import torch
6
  import time
7
  from diffusers import DiffusionPipeline, AutoencoderTiny
8
  from custom_pipeline import FluxWithCFGPipeline
 
9
 
10
  # Constants
11
  MAX_SEED = np.iinfo(np.int32).max
@@ -14,6 +15,9 @@ DEFAULT_WIDTH = 1024
14
  DEFAULT_HEIGHT = 768
15
  DEFAULT_INFERENCE_STEPS = 4
16
 
 
 
 
17
  # Device and model setup
18
  dtype = torch.float16
19
  pipe = FluxWithCFGPipeline.from_pretrained(
@@ -23,15 +27,25 @@ pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtyp
23
  pipe.to("cuda")
24
  torch.cuda.empty_cache()
25
 
 
 
 
 
 
 
 
26
  # Inference function
27
  @spaces.GPU(duration=25)
28
  def generate_image(prompt, seed=24, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, randomize_seed=False, progress=gr.Progress(track_tqdm=True)):
 
 
 
29
  if randomize_seed:
30
  seed = random.randint(0, MAX_SEED)
31
  generator = torch.Generator().manual_seed(int(float(seed)))
32
 
33
  img = pipe.generate_images(
34
- prompt=prompt,
35
  width=width,
36
  height=height,
37
  num_inference_steps=DEFAULT_INFERENCE_STEPS,
@@ -70,53 +84,4 @@ footer {visibility: hidden;}
70
  .generate-box .row {display: flex; align-items: center; margin-bottom: 10px;}
71
  .generate-box .row > * {margin-right: 10px;}
72
  .generate-box .row > *:last-child {margin-right: 0;}
73
- .advanced-options {background-color: #e0e0e0; border-radius: 10px; padding: 20px; margin-top: 20px;}
74
- .examples-gallery {margin-top: 30px;}
75
- """
76
-
77
- # --- Gradio UI ---
78
- with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
79
- with gr.Column(elem_id="container"):
80
- gr.Markdown("# Open FLUX 1.1 Pro")
81
- gr.Markdown("Flux Schnell-based with no commercial restrictions, 4-step fast image generation with quality enhancement, and improved memory efficiency (VAE).")
82
-
83
- with gr.Row():
84
- with gr.Column(scale=2):
85
- result = gr.Image(label="Generated Image", show_label=False, interactive=False, elem_classes="image-box")
86
- with gr.Column(scale=1):
87
- with gr.Column(elem_classes="generate-box"):
88
- prompt = gr.Text(
89
- label="Prompt",
90
- placeholder="sexy woman & man , under wear, full body, sunday",
91
- lines=3,
92
- )
93
- generateBtn = gr.Button("Generate Image", variant="primary")
94
-
95
- with gr.Column(elem_classes="advanced-options"):
96
- with gr.Row():
97
- seed = gr.Number(label="Seed", value=42)
98
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
99
- with gr.Row():
100
- width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_WIDTH)
101
- height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_HEIGHT)
102
-
103
- with gr.Column(elem_classes="examples-gallery"):
104
- gr.Markdown("### Gallery")
105
- gr.Examples(
106
- examples=examples,
107
- fn=generate_image,
108
- inputs=[prompt],
109
- outputs=[result, seed],
110
- cache_examples="lazy"
111
- )
112
-
113
- generateBtn.click(
114
- fn=generate_image,
115
- inputs=[prompt, seed, width, height, randomize_seed],
116
- outputs=[result, seed],
117
- show_progress="full",
118
- api_name="GenerateImage",
119
- )
120
-
121
- # Launch the app
122
- demo.launch()
 
6
  import time
7
  from diffusers import DiffusionPipeline, AutoencoderTiny
8
  from custom_pipeline import FluxWithCFGPipeline
9
+ from transformers import pipeline
10
 
11
  # Constants
12
  MAX_SEED = np.iinfo(np.int32).max
 
15
  DEFAULT_HEIGHT = 768
16
  DEFAULT_INFERENCE_STEPS = 4
17
 
18
+ # Initialize translator
19
+ translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
20
+
21
  # Device and model setup
22
  dtype = torch.float16
23
  pipe = FluxWithCFGPipeline.from_pretrained(
 
27
  pipe.to("cuda")
28
  torch.cuda.empty_cache()
29
 
30
+ # Translation function
31
+ def translate_to_english(text):
32
+ if any(ord('가') <= ord(char) <= ord('힣') for char in text):
33
+ translated = translator(text)[0]['translation_text']
34
+ return translated
35
+ return text
36
+
37
  # Inference function
38
  @spaces.GPU(duration=25)
39
  def generate_image(prompt, seed=24, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, randomize_seed=False, progress=gr.Progress(track_tqdm=True)):
40
+ # Translate prompt if Korean
41
+ english_prompt = translate_to_english(prompt)
42
+
43
  if randomize_seed:
44
  seed = random.randint(0, MAX_SEED)
45
  generator = torch.Generator().manual_seed(int(float(seed)))
46
 
47
  img = pipe.generate_images(
48
+ prompt=english_prompt,
49
  width=width,
50
  height=height,
51
  num_inference_steps=DEFAULT_INFERENCE_STEPS,
 
84
  .generate-box .row {display: flex; align-items: center; margin-bottom: 10px;}
85
  .generate-box .row > * {margin-right: 10px;}
86
  .generate-box .row > *:last-child {margin-right: 0;}
87
+ .advanced-options {background-color: #e0e0e0; border-radius