AlanB commited on
Commit
e186a0a
1 Parent(s): c978a88

Changed PIL Image importing method to fix error

Browse files
Files changed (1) hide show
  1. pipeline.py +5 -5
pipeline.py CHANGED
@@ -1,7 +1,7 @@
1
  from typing import Union, Callable, Optional
2
 
3
  import torch
4
-
5
  from diffusers import (
6
  AutoencoderKL,
7
  DDIMScheduler,
@@ -10,7 +10,7 @@ from diffusers import (
10
  PNDMScheduler,
11
  UNet2DConditionModel,
12
  )
13
- from PIL import Image
14
  from torchvision import transforms as tfms
15
  from tqdm.auto import tqdm
16
  from transformers import CLIPTextModel, CLIPTokenizer
@@ -44,7 +44,7 @@ class MagicMixPipeline(DiffusionPipeline):
44
  img = (img / 2 + 0.5).clamp(0, 1)
45
  img = img.detach().cpu().permute(0, 2, 3, 1).numpy()
46
  img = (img * 255).round().astype("uint8")
47
- return Image.fromarray(img[0])
48
 
49
  # convert prompt into text embeddings, also unconditional embeddings
50
  def prep_text(self, prompt):
@@ -72,7 +72,7 @@ class MagicMixPipeline(DiffusionPipeline):
72
 
73
  def __call__(
74
  self,
75
- img: Image.Image,
76
  prompt: str,
77
  kmin: float = 0.3,
78
  kmax: float = 0.6,
@@ -82,7 +82,7 @@ class MagicMixPipeline(DiffusionPipeline):
82
  guidance_scale: float = 7.5,
83
  callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
84
  callback_steps: Optional[int] = 1,
85
- ) -> Image.Image:
86
  tmin = steps - int(kmin * steps)
87
  tmax = steps - int(kmax * steps)
88
 
 
1
  from typing import Union, Callable, Optional
2
 
3
  import torch
4
+ import PIL
5
  from diffusers import (
6
  AutoencoderKL,
7
  DDIMScheduler,
 
10
  PNDMScheduler,
11
  UNet2DConditionModel,
12
  )
13
+ #from PIL import Image
14
  from torchvision import transforms as tfms
15
  from tqdm.auto import tqdm
16
  from transformers import CLIPTextModel, CLIPTokenizer
 
44
  img = (img / 2 + 0.5).clamp(0, 1)
45
  img = img.detach().cpu().permute(0, 2, 3, 1).numpy()
46
  img = (img * 255).round().astype("uint8")
47
+ return PIL.Image.fromarray(img[0])
48
 
49
  # convert prompt into text embeddings, also unconditional embeddings
50
  def prep_text(self, prompt):
 
72
 
73
  def __call__(
74
  self,
75
+ img: PIL.Image.Image,
76
  prompt: str,
77
  kmin: float = 0.3,
78
  kmax: float = 0.6,
 
82
  guidance_scale: float = 7.5,
83
  callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
84
  callback_steps: Optional[int] = 1,
85
+ ) -> PIL.Image.Image:
86
  tmin = steps - int(kmin * steps)
87
  tmax = steps - int(kmax * steps)
88