File size: 9,429 Bytes
de353d4
dbcd6fd
 
 
 
 
 
3af12da
dbcd6fd
 
 
 
 
e0da60b
dbcd6fd
3ce50bb
 
 
 
 
 
 
 
 
 
 
 
 
 
dbcd6fd
 
 
 
 
 
 
 
 
55a0951
451ef4f
dbcd6fd
 
 
 
 
 
 
e0da60b
dbcd6fd
e0da60b
 
 
dbcd6fd
 
 
 
 
 
 
 
55a0951
 
451ef4f
 
 
dbcd6fd
 
 
e0da60b
 
 
 
 
 
 
 
 
 
 
dbcd6fd
 
 
 
 
e0da60b
dbcd6fd
 
 
 
e0da60b
dbcd6fd
e0da60b
 
dbcd6fd
 
 
 
 
 
3af12da
dbcd6fd
 
451ef4f
dbcd6fd
 
3af12da
 
 
dbcd6fd
 
a5719dd
dbcd6fd
 
f9c3442
dbcd6fd
 
3af12da
dbcd6fd
 
63617d6
dbcd6fd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6c507a8
dbcd6fd
cd456e7
dbcd6fd
 
3af12da
451ef4f
3af12da
 
bd53067
cd456e7
 
 
dbcd6fd
 
4abc4c8
e50258a
451ef4f
228c83f
55a0951
228c83f
 
 
dbcd6fd
 
e0da60b
dbcd6fd
4b1184e
cd456e7
dbcd6fd
 
 
 
 
 
e0da60b
dbcd6fd
e0da60b
dbcd6fd
e0da60b
 
dbcd6fd
 
 
1643a94
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
import os
import gradio as gr
import numpy as np
import spaces
import torch
import random
from PIL import Image
from typing import Iterable

from diffusers import FluxKontextPipeline
from diffusers.utils import load_image
from huggingface_hub import hf_hub_download
from aura_sr import AuraSR
from gradio_imageslider import ImageSlider

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# --- # Device and CUDA Setup Check ---
print("CUDA_VISIBLE_DEVICES=", os.environ.get("CUDA_VISIBLE_DEVICES"))
print("torch.__version__ =", torch.__version__)
print("torch.version.cuda =", torch.version.cuda)
print("cuda available:", torch.cuda.is_available())
print("cuda device count:", torch.cuda.device_count())
if torch.cuda.is_available():
    print("current device:", torch.cuda.current_device())
    print("device name:", torch.cuda.get_device_name(torch.cuda.current_device()))

print("Using device:", device)

# --- Main Model Initialization ---
MAX_SEED = np.iinfo(np.int32).max
pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")

# --- Load All Adapters ---
pipe.load_lora_weights("prithivMLmods/PhotoCleanser-i2i", weight_name="PhotoCleanser-i2i.safetensors", adapter_name="cleanser")
pipe.load_lora_weights("prithivMLmods/Photo-Restore-i2i", weight_name="Photo-Restore-i2i.safetensors", adapter_name="restorer")
pipe.load_lora_weights("prithivMLmods/Polaroid-Warm-i2i", weight_name="Polaroid-Warm-i2i.safetensors", adapter_name="polaroid")
pipe.load_lora_weights("prithivMLmods/Monochrome-Pencil", weight_name="Monochrome-Pencil-i2i.safetensors", adapter_name="pencil")
pipe.load_lora_weights("prithivMLmods/LZO-1-Preview", weight_name="LZO-1-Preview.safetensors", adapter_name="lzo")
pipe.load_lora_weights("prithivMLmods/Kontext-Watermark-Remover", weight_name="Kontext-Watermark-Remover.safetensors", adapter_name="watermark-remover")

# --- Upscaler Model Initialization ---
aura_sr = AuraSR.from_pretrained("fal/AuraSR-v2")

@spaces.GPU
def infer(input_image, prompt, lora_adapter, upscale_image, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
    """
    Perform image editing and optional upscaling, returning a pair for the ImageSlider.
    """
    if not input_image:
        raise gr.Error("Please upload an image for editing.")

    if lora_adapter == "PhotoCleanser":
        pipe.set_adapters(["cleanser"], adapter_weights=[1.0])
    elif lora_adapter == "PhotoRestorer":
        pipe.set_adapters(["restorer"], adapter_weights=[1.0])
    elif lora_adapter == "PolaroidWarm":
        pipe.set_adapters(["polaroid"], adapter_weights=[1.0])
    elif lora_adapter == "MonochromePencil":
        pipe.set_adapters(["pencil"], adapter_weights=[1.0])
    elif lora_adapter == "LZO-Zoom":
        pipe.set_adapters(["lzo"], adapter_weights=[1.0])
    elif lora_adapter == "Kontext-Watermark-Remover":
        pipe.set_adapters(["watermark-remover"], adapter_weights=[1.0])
        
    if randomize_seed:
        seed = random.randint(0, MAX_SEED)
    
    original_image = input_image.copy().convert("RGB")
    
    image = pipe(
        image=original_image, 
        prompt=prompt,
        guidance_scale=guidance_scale,
        width = original_image.size[0],
        height = original_image.size[1],
        num_inference_steps=steps,
        generator=torch.Generator().manual_seed(seed),
    ).images[0]

    if upscale_image:
        progress(0.8, desc="Upscaling image...")
        image = aura_sr.upscale_4x(image)

    return (original_image, image), seed, gr.Button(visible=True)

@spaces.GPU
def infer_example(input_image, prompt, lora_adapter):
    """
    Wrapper function for gr.Examples to call the main infer logic for the slider.
    """
    (original_image, generated_image), seed, _ = infer(input_image, prompt, lora_adapter, upscale_image=False)
    return (original_image, generated_image), seed

css="""
#col-container {
    margin: 0 auto;
    max-width: 960px;
}
#main-title h1 {font-size: 2.1em !important;}
"""

with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
    
    with gr.Column(elem_id="col-container"):
        gr.Markdown("# **Photo-Mate-i2i**", elem_id="main-title")
        gr.Markdown("Image manipulation with FLUX.1 Kontext adapters. [How to Use](https://huggingface.co/spaces/prithivMLmods/Photo-Mate-i2i/discussions/2)")
        
        with gr.Row():
            with gr.Column():
                input_image = gr.Image(label="Upload Image", type="pil", height="300")
                with gr.Row():
                    prompt = gr.Text(
                        label="Edit Prompt",
                        show_label=False,
                        max_lines=1,
                        placeholder="Enter your prompt for editing (e.g., 'Remove glasses')",
                        container=False,
                    )
                    run_button = gr.Button("Run", variant="primary", scale=0)
                with gr.Accordion("Advanced Settings", open=False):
                    
                    seed = gr.Slider(
                        label="Seed",
                        minimum=0,
                        maximum=MAX_SEED,
                        step=1,
                        value=0,
                    )
                    
                    randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
                    
                    guidance_scale = gr.Slider(
                        label="Guidance Scale",
                        minimum=1,
                        maximum=10,
                        step=0.1,
                        value=2.5,
                    )       
                    
                    steps = gr.Slider(
                        label="Steps",
                        minimum=1,
                        maximum=30,
                        value=28,
                        step=1
                    )
                    
            with gr.Column():
                output_slider = ImageSlider(label="Before / After", show_label=False, interactive=False)
                reuse_button = gr.Button("Reuse this image", visible=False)
                
                with gr.Row():
                    lora_adapter = gr.Dropdown(
                        label="Chosen LoRA",
                        choices=["PhotoCleanser", "PhotoRestorer", "PolaroidWarm", "MonochromePencil", "LZO-Zoom", "Kontext-Watermark-Remover"],
                        value="PhotoCleanser"
                    )
                    
                with gr.Row():
                    upscale_checkbox = gr.Checkbox(label="Upscale the final image", value=False)

        gr.Examples(
            examples=[
                ["photocleanser/2.png", "[photo content], remove the cat from the image while preserving the background and remaining elements, maintaining realism and original details.", "PhotoCleanser"],
                ["photocleanser/1.png", "[photo content], remove the football from the image while preserving the background and remaining elements, maintaining realism and original details.", "PhotoCleanser"],
                ["watermark/12.jpeg", "[photo content], remove any watermark text or logos from the image while preserving the background, texture, lighting, and overall realism. Ensure the edited areas blend seamlessly with surrounding details, leaving no visible traces of watermark removal.", "Kontext-Watermark-Remover"],
                ["photorestore/1.png", "[photo content], restore and enhance the image by repairing any damage, scratches, or fading. Colorize the photo naturally while preserving authentic textures and details, maintaining a realistic and historically accurate look.", "PhotoRestorer"],
                ["lzo/1.jpg", "[photo content], zoom in on the specified [face close-up], enhancing resolution and detail while preserving sharpness, realism, and original context. Maintain natural proportions and background continuity around the zoomed area.", "LZO-Zoom"],
                ["photorestore/2.png", "[photo content], restore and enhance the image by repairing any damage, scratches, or fading. Colorize the photo naturally while preserving authentic textures and details, maintaining a realistic and historically accurate look.", "PhotoRestorer"],
                ["polaroid/1.png", "[photo content], in the style of a vintage Polaroid, with warm, faded tones, and a white border.", "PolaroidWarm"],
                ["pencil/1.png", "[photo content], replicate the image as a pencil illustration, black and white, with sketch-like detailing.", "MonochromePencil"],
            ],
            inputs=[input_image, prompt, lora_adapter],
            outputs=[output_slider, seed],
            fn=infer_example,
            cache_examples="lazy",
            label="Examples"
        )
            
    gr.on(
        triggers=[run_button.click, prompt.submit],
        fn=infer,
        inputs=[input_image, prompt, lora_adapter, upscale_checkbox, seed, randomize_seed, guidance_scale, steps],
        outputs=[output_slider, seed, reuse_button]
    )
    
    reuse_button.click(
        fn=lambda images: images[1] if isinstance(images, (list, tuple)) and len(images) > 1 else images,
        inputs=[output_slider],
        outputs=[input_image]
    )

demo.launch(mcp_server=True, ssr_mode=False, show_error=True)