EditAnything / tools /sam2image_ori_version.py
shgao's picture
update new demo
0c7479d
# Edit Anything trained with Stable Diffusion + ControlNet + SAM + BLIP2
from torchvision.utils import save_image
from PIL import Image
from cldm.ddim_hacked import DDIMSampler
from cldm.model import create_model, load_state_dict
from pytorch_lightning import seed_everything
from share import *
import config
import cv2
import einops
import gradio as gr
import numpy as np
import torch
import random
import os
from annotator.util import resize_image, HWC3
device = "cuda" if torch.cuda.is_available() else "cpu"
use_blip = True
use_gradio = False
# Diffusion init.
model = create_model('./models/cldm_v21.yaml').cpu()
model.load_state_dict(load_state_dict(
'models/edit-anything-ckpt-v0-1.ckpt', location='cuda'))
model.to(device=device)
ddim_sampler = DDIMSampler(model)
# Segment-Anything init.
# pip install git+https://github.com/facebookresearch/segment-anything.git
from segment_anything import sam_model_registry, SamAutomaticMaskGenerator
sam_checkpoint = "models/sam_vit_h_4b8939.pth"
model_type = "default"
sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
sam.to(device=device)
mask_generator = SamAutomaticMaskGenerator(sam)
# BLIP2 init.
if use_blip:
# need the latest transformers
# pip install git+https://github.com/huggingface/transformers.git
from transformers import AutoProcessor, Blip2ForConditionalGeneration
processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
blip_model = Blip2ForConditionalGeneration.from_pretrained(
"Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16)
blip_model.to(device)
def get_blip2_text(image):
inputs = processor(image, return_tensors="pt").to(device, torch.float16)
generated_ids = blip_model.generate(**inputs, max_new_tokens=50)
generated_text = processor.batch_decode(
generated_ids, skip_special_tokens=True)[0].strip()
return generated_text
def show_anns(anns):
if len(anns) == 0:
return
sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True)
full_img = None
# for ann in sorted_anns:
for i in range(len(sorted_anns)):
ann = anns[i]
m = ann['segmentation']
if full_img is None:
full_img = np.zeros((m.shape[0], m.shape[1], 3))
map = np.zeros((m.shape[0], m.shape[1]), dtype=np.uint16)
map[m != 0] = i + 1
color_mask = np.random.random((1, 3)).tolist()[0]
full_img[m != 0] = color_mask
full_img = full_img*255
# anno encoding from https://github.com/LUSSeg/ImageNet-S
res = np.zeros((map.shape[0], map.shape[1], 3))
res[:, :, 0] = map % 256
res[:, :, 1] = map // 256
res.astype(np.float32)
return full_img, res
def get_sam_control(image):
masks = mask_generator.generate(image)
full_img, res = show_anns(masks)
return full_img, res
def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta):
with torch.no_grad():
if use_blip:
print("Generating text:")
blip2_prompt = get_blip2_text(input_image)
print("Generated text:", blip2_prompt)
if len(prompt)>0:
prompt = blip2_prompt + ',' + prompt
else:
prompt = blip2_prompt
print("All text:", prompt)
input_image = HWC3(input_image)
img = resize_image(input_image, image_resolution)
H, W, C = img.shape
print("Generating SAM seg:")
# the default SAM model is trained with 1024 size.
full_segmask, detected_map = get_sam_control(
resize_image(input_image, detect_resolution))
detected_map = HWC3(detected_map.astype(np.uint8))
detected_map = cv2.resize(
detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
control = torch.from_numpy(
detected_map.copy()).float().cuda()
control = torch.stack([control for _ in range(num_samples)], dim=0)
control = einops.rearrange(control, 'b h w c -> b c h w').clone()
if seed == -1:
seed = random.randint(0, 65535)
seed_everything(seed)
if config.save_memory:
model.low_vram_shift(is_diffusing=False)
cond = {"c_concat": [control], "c_crossattn": [
model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [
model.get_learned_conditioning([n_prompt] * num_samples)]}
shape = (4, H // 8, W // 8)
if config.save_memory:
model.low_vram_shift(is_diffusing=True)
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
[strength] * 13) # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
shape, cond, verbose=False, eta=eta,
unconditional_guidance_scale=scale,
unconditional_conditioning=un_cond)
if config.save_memory:
model.low_vram_shift(is_diffusing=False)
x_samples = model.decode_first_stage(samples)
x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c')
* 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
results = [x_samples[i] for i in range(num_samples)]
return [full_segmask] + results
# disable gradio when not using GUI.
if not use_gradio:
image_path = "images/sa_309398.jpg"
input_image = Image.open(image_path)
input_image = np.array(input_image, dtype=np.uint8)
prompt = ""
a_prompt = 'best quality, extremely detailed'
n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
num_samples = 5
image_resolution = 512
detect_resolution = 512
ddim_steps = 100
guess_mode = False
strength = 1.0
scale = 9.0
seed = 10086
eta = 0.0
outputs = process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution,
detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta)
image_list = []
input_image = resize_image(input_image, 512)
image_list.append(torch.tensor(input_image))
for i in range(len(outputs)):
each = outputs[i]
each = resize_image(each, 512)
print(i, each.shape)
image_list.append(torch.tensor(each))
image_list = torch.stack(image_list).permute(0, 3, 1, 2)
save_image(image_list, "sample.jpg", nrow=4,
normalize=True, value_range=(0, 255))
else:
block = gr.Blocks().queue()
with block:
with gr.Row():
gr.Markdown(
"## Edit Anything powered by ControlNet+SAM+BLIP2+Stable Diffusion")
with gr.Row():
with gr.Column():
input_image = gr.Image(source='upload', type="numpy")
prompt = gr.Textbox(label="Prompt")
run_button = gr.Button(label="Run")
with gr.Accordion("Advanced options", open=False):
num_samples = gr.Slider(
label="Images", minimum=1, maximum=12, value=1, step=1)
image_resolution = gr.Slider(
label="Image Resolution", minimum=256, maximum=768, value=512, step=64)
strength = gr.Slider(
label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
guess_mode = gr.Checkbox(label='Guess Mode', value=False)
detect_resolution = gr.Slider(
label="SAM Resolution", minimum=128, maximum=2048, value=1024, step=1)
ddim_steps = gr.Slider(
label="Steps", minimum=1, maximum=100, value=20, step=1)
scale = gr.Slider(
label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
seed = gr.Slider(label="Seed", minimum=-1,
maximum=2147483647, step=1, randomize=True)
eta = gr.Number(label="eta (DDIM)", value=0.0)
a_prompt = gr.Textbox(
label="Added Prompt", value='best quality, extremely detailed')
n_prompt = gr.Textbox(label="Negative Prompt",
value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
with gr.Column():
result_gallery = gr.Gallery(
label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution,
detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta]
run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
block.launch(server_name='0.0.0.0')