akhaliq HF staff commited on
Commit
3e0e236
1 Parent(s): 11b229f

add runway inpainting model with diffusers

Browse files
Files changed (2) hide show
  1. app.py +4 -15
  2. requirements.txt +1 -1
app.py CHANGED
@@ -11,23 +11,12 @@ import torch
11
  from torch import autocast
12
  import cv2
13
  from matplotlib import pyplot as plt
14
- from inpainting import StableDiffusionInpaintingPipeline
15
  from torchvision import transforms
16
-
17
- auth_token = os.environ.get("API_TOKEN") or True
18
-
19
- def download_image(url):
20
- response = requests.get(url)
21
- return PIL.Image.open(BytesIO(response.content)).convert("RGB")
22
 
23
  device = "cuda" if torch.cuda.is_available() else "cpu"
24
- pipe = StableDiffusionInpaintingPipeline.from_pretrained(
25
- "CompVis/stable-diffusion-v1-4",
26
- revision="fp16",
27
- torch_dtype=torch.float16,
28
- use_auth_token=auth_token,
29
- ).to(device)
30
 
 
31
 
32
  transform = transforms.Compose([
33
  transforms.ToTensor(),
@@ -39,8 +28,8 @@ def predict(dict, prompt=""):
39
  with autocast("cuda"):
40
  init_image = dict["image"].convert("RGB").resize((512, 512))
41
  mask = dict["mask"].convert("RGB").resize((512, 512))
42
- images = pipe(prompt = prompt, init_image=init_image, mask_image=mask, strength=0.8)["sample"]
43
- return images[0]
44
 
45
  examples = [[dict(image="init_image.png", mask="mask_image.png"), "A panda sitting on a bench"]]
46
 
 
11
  from torch import autocast
12
  import cv2
13
  from matplotlib import pyplot as plt
 
14
  from torchvision import transforms
15
+ from diffusers import DiffusionPipeline
 
 
 
 
 
16
 
17
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
 
 
 
 
18
 
19
+ pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-inpainting", dtype=torch.float16, revision="fp16")
20
 
21
  transform = transforms.Compose([
22
  transforms.ToTensor(),
 
28
  with autocast("cuda"):
29
  init_image = dict["image"].convert("RGB").resize((512, 512))
30
  mask = dict["mask"].convert("RGB").resize((512, 512))
31
+ output = pipe(prompt = prompt, image=init_image, mask_image=mask, strength=0.8,num_inference_steps=20)
32
+ return output.images[0]
33
 
34
  examples = [[dict(image="init_image.png", mask="mask_image.png"), "A panda sitting on a bench"]]
35
 
requirements.txt CHANGED
@@ -1,7 +1,7 @@
1
  --extra-index-url https://download.pytorch.org/whl/cu113
2
  torch
3
  torchvision
4
- diffusers
5
  transformers
6
  ftfy
7
  numpy
 
1
  --extra-index-url https://download.pytorch.org/whl/cu113
2
  torch
3
  torchvision
4
+ git+https://github.com/huggingface/diffusers.git
5
  transformers
6
  ftfy
7
  numpy