File size: 3,116 Bytes
3c4f472
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d9e73df
 
 
 
 
3c4f472
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c7942f0
d9e73df
b68e5b2
c7942f0
b68e5b2
 
1467618
d9e73df
 
b68e5b2
 
 
 
 
1467618
 
 
 
 
 
 
 
 
 
 
 
 
 
3c4f472
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import gradio as gr
import torch
#from torch import autocast // only for GPU

from PIL import Image
import numpy as np
from io import BytesIO
import os
MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')

#from diffusers import StableDiffusionPipeline
from diffusers import StableDiffusionImg2ImgPipeline

print("hello sylvain")

YOUR_TOKEN=MY_SECRET_TOKEN

device="cpu"

#prompt_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=YOUR_TOKEN)
#prompt_pipe.to(device)

img_pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
    "runwayml/stable-diffusion-v1-5",
    use_auth_token=YOUR_TOKEN,
    safety_checker=None,             # ← disable safety checker
)
img_pipe.to(device)

source_img = gr.Image(source="upload", type="filepath", label="init_img | 512*512 px")
gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[1], height="auto")

def resize(value,img):
  #baseheight = value
  img = Image.open(img)
  #hpercent = (baseheight/float(img.size[1]))
  #wsize = int((float(img.size[0])*float(hpercent)))
  #img = img.resize((wsize,baseheight), Image.Resampling.LANCZOS)
  img = img.resize((value,value), Image.Resampling.LANCZOS)
  return img


def infer(source_img, prompt, guide, steps, seed, strength):
    generator = torch.Generator("cpu").manual_seed(seed)

    source_image = Image.open(source_img).convert("RGB")
    source_image = source_image.resize((512, 512), Image.Resampling.LANCZOS)

    result = img_pipe(
        [prompt],
        image=source_image,
        strength=strength,
        guidance_scale=guide,
        num_inference_steps=steps,
        generator=generator
    )

    output_images = result["images"]
    output_paths = []

    for idx, img in enumerate(output_images):
        filename = f"output_{seed}_{idx}.png"
        save_path = os.path.join("outputs", filename)
        os.makedirs("outputs", exist_ok=True)
        img.save(save_path)
        print(f"Saved image to: {save_path}")
        output_paths.append(save_path)

    # Optional: return paths or Gradio can render them too
    return output_images

print("Great sylvain ! Everything is working fine !")

title="Img2Img Stable Diffusion CPU"
description="<p style='text-align: center;'>Img2Img Stable Diffusion example using CPU and HF token. <br />Warning: Slow process... ~5/10 min inference time. <b>NSFW filter enabled. <br /> <img id='visitor-badge' alt='visitor badge' src='https://visitor-badge.glitch.me/badge?page_id=gradio-blocks.stable-diffusion-img2img' style='display: inline-block'/></b></p>" 

gr.Interface(fn=infer, inputs=[source_img,
    "text",
    gr.Slider(2, 15, value = 7, label = 'Guidence Scale'),
    gr.Slider(10, 50, value = 25, step = 1, label = 'Number of Iterations'),
    gr.Slider(label = "Seed", minimum = 0, maximum = 2147483647, step = 1, randomize = True),
    gr.Slider(label='Strength', minimum = 0, maximum = 1, step = .05, value = .75)],
    outputs=gallery,title=title,description=description, allow_flagging="manual", flagging_dir="flagged").queue(max_size=100).launch(enable_queue=True)