Datasets:

ArXiv:
Diffusers Bot commited on
Commit
91f4894
1 Parent(s): bdbb08e

Upload folder using huggingface_hub

Browse files
main/README.md CHANGED
@@ -1435,9 +1435,9 @@ import requests
1435
  import torch
1436
  from diffusers import DiffusionPipeline
1437
  from PIL import Image
1438
- from transformers import CLIPFeatureExtractor, CLIPModel
1439
 
1440
- feature_extractor = CLIPFeatureExtractor.from_pretrained(
1441
  "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
1442
  )
1443
  clip_model = CLIPModel.from_pretrained(
@@ -2122,7 +2122,7 @@ import torch
2122
  import open_clip
2123
  from open_clip import SimpleTokenizer
2124
  from diffusers import DiffusionPipeline
2125
- from transformers import CLIPFeatureExtractor, CLIPModel
2126
 
2127
 
2128
  def download_image(url):
@@ -2130,7 +2130,7 @@ def download_image(url):
2130
  return PIL.Image.open(BytesIO(response.content)).convert("RGB")
2131
 
2132
  # Loading additional models
2133
- feature_extractor = CLIPFeatureExtractor.from_pretrained(
2134
  "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
2135
  )
2136
  clip_model = CLIPModel.from_pretrained(
 
1435
  import torch
1436
  from diffusers import DiffusionPipeline
1437
  from PIL import Image
1438
+ from transformers import CLIPImageProcessor, CLIPModel
1439
 
1440
+ feature_extractor = CLIPImageProcessor.from_pretrained(
1441
  "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
1442
  )
1443
  clip_model = CLIPModel.from_pretrained(
 
2122
  import open_clip
2123
  from open_clip import SimpleTokenizer
2124
  from diffusers import DiffusionPipeline
2125
+ from transformers import CLIPImageProcessor, CLIPModel
2126
 
2127
 
2128
  def download_image(url):
 
2130
  return PIL.Image.open(BytesIO(response.content)).convert("RGB")
2131
 
2132
  # Loading additional models
2133
+ feature_extractor = CLIPImageProcessor.from_pretrained(
2134
  "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
2135
  )
2136
  clip_model = CLIPModel.from_pretrained(
main/clip_guided_images_mixing_stable_diffusion.py CHANGED
@@ -7,7 +7,7 @@ import PIL.Image
7
  import torch
8
  from torch.nn import functional as F
9
  from torchvision import transforms
10
- from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
11
 
12
  from diffusers import (
13
  AutoencoderKL,
@@ -86,7 +86,7 @@ class CLIPGuidedImagesMixingStableDiffusion(DiffusionPipeline, StableDiffusionMi
86
  tokenizer: CLIPTokenizer,
87
  unet: UNet2DConditionModel,
88
  scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
89
- feature_extractor: CLIPFeatureExtractor,
90
  coca_model=None,
91
  coca_tokenizer=None,
92
  coca_transform=None,
 
7
  import torch
8
  from torch.nn import functional as F
9
  from torchvision import transforms
10
+ from transformers import CLIPImageProcessor, CLIPModel, CLIPTextModel, CLIPTokenizer
11
 
12
  from diffusers import (
13
  AutoencoderKL,
 
86
  tokenizer: CLIPTokenizer,
87
  unet: UNet2DConditionModel,
88
  scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
89
+ feature_extractor: CLIPImageProcessor,
90
  coca_model=None,
91
  coca_tokenizer=None,
92
  coca_transform=None,
main/clip_guided_stable_diffusion_img2img.py CHANGED
@@ -7,7 +7,7 @@ import torch
7
  from torch import nn
8
  from torch.nn import functional as F
9
  from torchvision import transforms
10
- from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
11
 
12
  from diffusers import (
13
  AutoencoderKL,
@@ -32,9 +32,9 @@ EXAMPLE_DOC_STRING = """
32
  import torch
33
  from diffusers import DiffusionPipeline
34
  from PIL import Image
35
- from transformers import CLIPFeatureExtractor, CLIPModel
36
 
37
- feature_extractor = CLIPFeatureExtractor.from_pretrained(
38
  "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
39
  )
40
  clip_model = CLIPModel.from_pretrained(
@@ -139,7 +139,7 @@ class CLIPGuidedStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
139
  tokenizer: CLIPTokenizer,
140
  unet: UNet2DConditionModel,
141
  scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
142
- feature_extractor: CLIPFeatureExtractor,
143
  ):
144
  super().__init__()
145
  self.register_modules(
 
7
  from torch import nn
8
  from torch.nn import functional as F
9
  from torchvision import transforms
10
+ from transformers import CLIPImageProcessor, CLIPModel, CLIPTextModel, CLIPTokenizer
11
 
12
  from diffusers import (
13
  AutoencoderKL,
 
32
  import torch
33
  from diffusers import DiffusionPipeline
34
  from PIL import Image
35
+ from transformers import CLIPImageProcessor, CLIPModel
36
 
37
+ feature_extractor = CLIPImageProcessor.from_pretrained(
38
  "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
39
  )
40
  clip_model = CLIPModel.from_pretrained(
 
139
  tokenizer: CLIPTokenizer,
140
  unet: UNet2DConditionModel,
141
  scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
142
+ feature_extractor: CLIPImageProcessor,
143
  ):
144
  super().__init__()
145
  self.register_modules(
main/mixture_canvas.py CHANGED
@@ -9,7 +9,7 @@ import torch
9
  from numpy import exp, pi, sqrt
10
  from torchvision.transforms.functional import resize
11
  from tqdm.auto import tqdm
12
- from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
13
 
14
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
15
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
@@ -275,7 +275,7 @@ class StableDiffusionCanvasPipeline(DiffusionPipeline, StableDiffusionMixin):
275
  unet: UNet2DConditionModel,
276
  scheduler: Union[DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler],
277
  safety_checker: StableDiffusionSafetyChecker,
278
- feature_extractor: CLIPFeatureExtractor,
279
  ):
280
  super().__init__()
281
  self.register_modules(
 
9
  from numpy import exp, pi, sqrt
10
  from torchvision.transforms.functional import resize
11
  from tqdm.auto import tqdm
12
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
13
 
14
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
15
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
 
275
  unet: UNet2DConditionModel,
276
  scheduler: Union[DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler],
277
  safety_checker: StableDiffusionSafetyChecker,
278
+ feature_extractor: CLIPImageProcessor,
279
  ):
280
  super().__init__()
281
  self.register_modules(
main/mixture_tiling.py CHANGED
@@ -15,7 +15,7 @@ from diffusers.utils import logging
15
 
16
  try:
17
  from ligo.segments import segment
18
- from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
19
  except ImportError:
20
  raise ImportError("Please install transformers and ligo-segments to use the mixture pipeline")
21
 
@@ -144,7 +144,7 @@ class StableDiffusionTilingPipeline(DiffusionPipeline, StableDiffusionExtrasMixi
144
  unet: UNet2DConditionModel,
145
  scheduler: Union[DDIMScheduler, PNDMScheduler],
146
  safety_checker: StableDiffusionSafetyChecker,
147
- feature_extractor: CLIPFeatureExtractor,
148
  ):
149
  super().__init__()
150
  self.register_modules(
 
15
 
16
  try:
17
  from ligo.segments import segment
18
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
19
  except ImportError:
20
  raise ImportError("Please install transformers and ligo-segments to use the mixture pipeline")
21
 
 
144
  unet: UNet2DConditionModel,
145
  scheduler: Union[DDIMScheduler, PNDMScheduler],
146
  safety_checker: StableDiffusionSafetyChecker,
147
+ feature_extractor: CLIPImageProcessor,
148
  ):
149
  super().__init__()
150
  self.register_modules(
main/pipeline_stable_diffusion_xl_controlnet_adapter.py CHANGED
@@ -189,7 +189,7 @@ class StableDiffusionXLControlNetAdapterPipeline(
189
  safety_checker ([`StableDiffusionSafetyChecker`]):
190
  Classification module that estimates whether generated images could be considered offensive or harmful.
191
  Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
192
- feature_extractor ([`CLIPFeatureExtractor`]):
193
  Model that extracts features from generated images to be used as inputs for the `safety_checker`.
194
  """
195
 
 
189
  safety_checker ([`StableDiffusionSafetyChecker`]):
190
  Classification module that estimates whether generated images could be considered offensive or harmful.
191
  Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
192
+ feature_extractor ([`CLIPImageProcessor`]):
193
  Model that extracts features from generated images to be used as inputs for the `safety_checker`.
194
  """
195
 
main/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py CHANGED
@@ -332,7 +332,7 @@ class StableDiffusionXLControlNetAdapterInpaintPipeline(
332
  safety_checker ([`StableDiffusionSafetyChecker`]):
333
  Classification module that estimates whether generated images could be considered offensive or harmful.
334
  Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
335
- feature_extractor ([`CLIPFeatureExtractor`]):
336
  Model that extracts features from generated images to be used as inputs for the `safety_checker`.
337
  requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
338
  Whether the `unet` requires a aesthetic_score condition to be passed during inference. Also see the config
 
332
  safety_checker ([`StableDiffusionSafetyChecker`]):
333
  Classification module that estimates whether generated images could be considered offensive or harmful.
334
  Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
335
+ feature_extractor ([`CLIPImageProcessor`]):
336
  Model that extracts features from generated images to be used as inputs for the `safety_checker`.
337
  requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
338
  Whether the `unet` requires a aesthetic_score condition to be passed during inference. Also see the config
main/pipeline_zero1to3.py CHANGED
@@ -9,7 +9,7 @@ import numpy as np
9
  import PIL.Image
10
  import torch
11
  from packaging import version
12
- from transformers import CLIPFeatureExtractor, CLIPVisionModelWithProjection
13
 
14
  # from ...configuration_utils import FrozenDict
15
  # from ...models import AutoencoderKL, UNet2DConditionModel
@@ -87,7 +87,7 @@ class Zero1to3StableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
87
  safety_checker ([`StableDiffusionSafetyChecker`]):
88
  Classification module that estimates whether generated images could be considered offensive or harmful.
89
  Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
90
- feature_extractor ([`CLIPFeatureExtractor`]):
91
  Model that extracts features from generated images to be used as inputs for the `safety_checker`.
92
  cc_projection ([`CCProjection`]):
93
  Projection layer to project the concated CLIP features and pose embeddings to the original CLIP feature size.
@@ -102,7 +102,7 @@ class Zero1to3StableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
102
  unet: UNet2DConditionModel,
103
  scheduler: KarrasDiffusionSchedulers,
104
  safety_checker: StableDiffusionSafetyChecker,
105
- feature_extractor: CLIPFeatureExtractor,
106
  cc_projection: CCProjection,
107
  requires_safety_checker: bool = True,
108
  ):
 
9
  import PIL.Image
10
  import torch
11
  from packaging import version
12
+ from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
13
 
14
  # from ...configuration_utils import FrozenDict
15
  # from ...models import AutoencoderKL, UNet2DConditionModel
 
87
  safety_checker ([`StableDiffusionSafetyChecker`]):
88
  Classification module that estimates whether generated images could be considered offensive or harmful.
89
  Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
90
+ feature_extractor ([`CLIPImageProcessor`]):
91
  Model that extracts features from generated images to be used as inputs for the `safety_checker`.
92
  cc_projection ([`CCProjection`]):
93
  Projection layer to project the concated CLIP features and pose embeddings to the original CLIP feature size.
 
102
  unet: UNet2DConditionModel,
103
  scheduler: KarrasDiffusionSchedulers,
104
  safety_checker: StableDiffusionSafetyChecker,
105
+ feature_extractor: CLIPImageProcessor,
106
  cc_projection: CCProjection,
107
  requires_safety_checker: bool = True,
108
  ):
main/regional_prompting_stable_diffusion.py CHANGED
@@ -3,7 +3,7 @@ from typing import Dict, Optional
3
 
4
  import torch
5
  import torchvision.transforms.functional as FF
6
- from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
7
 
8
  from diffusers import StableDiffusionPipeline
9
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
@@ -69,7 +69,7 @@ class RegionalPromptingStableDiffusionPipeline(StableDiffusionPipeline):
69
  unet: UNet2DConditionModel,
70
  scheduler: KarrasDiffusionSchedulers,
71
  safety_checker: StableDiffusionSafetyChecker,
72
- feature_extractor: CLIPFeatureExtractor,
73
  requires_safety_checker: bool = True,
74
  ):
75
  super().__init__(
 
3
 
4
  import torch
5
  import torchvision.transforms.functional as FF
6
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
7
 
8
  from diffusers import StableDiffusionPipeline
9
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
 
69
  unet: UNet2DConditionModel,
70
  scheduler: KarrasDiffusionSchedulers,
71
  safety_checker: StableDiffusionSafetyChecker,
72
+ feature_extractor: CLIPImageProcessor,
73
  requires_safety_checker: bool = True,
74
  ):
75
  super().__init__(
main/stable_diffusion_ipex.py CHANGED
@@ -18,7 +18,7 @@ from typing import Any, Callable, Dict, List, Optional, Union
18
  import intel_extension_for_pytorch as ipex
19
  import torch
20
  from packaging import version
21
- from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
22
 
23
  from diffusers.configuration_utils import FrozenDict
24
  from diffusers.loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin
@@ -86,7 +86,7 @@ class StableDiffusionIPEXPipeline(
86
  safety_checker ([`StableDiffusionSafetyChecker`]):
87
  Classification module that estimates whether generated images could be considered offensive or harmful.
88
  Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
89
- feature_extractor ([`CLIPFeatureExtractor`]):
90
  Model that extracts features from generated images to be used as inputs for the `safety_checker`.
91
  """
92
 
@@ -100,7 +100,7 @@ class StableDiffusionIPEXPipeline(
100
  unet: UNet2DConditionModel,
101
  scheduler: KarrasDiffusionSchedulers,
102
  safety_checker: StableDiffusionSafetyChecker,
103
- feature_extractor: CLIPFeatureExtractor,
104
  requires_safety_checker: bool = True,
105
  ):
106
  super().__init__()
 
18
  import intel_extension_for_pytorch as ipex
19
  import torch
20
  from packaging import version
21
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
22
 
23
  from diffusers.configuration_utils import FrozenDict
24
  from diffusers.loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin
 
86
  safety_checker ([`StableDiffusionSafetyChecker`]):
87
  Classification module that estimates whether generated images could be considered offensive or harmful.
88
  Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
89
+ feature_extractor ([`CLIPImageProcessor`]):
90
  Model that extracts features from generated images to be used as inputs for the `safety_checker`.
91
  """
92
 
 
100
  unet: UNet2DConditionModel,
101
  scheduler: KarrasDiffusionSchedulers,
102
  safety_checker: StableDiffusionSafetyChecker,
103
+ feature_extractor: CLIPImageProcessor,
104
  requires_safety_checker: bool = True,
105
  ):
106
  super().__init__()
main/stable_diffusion_tensorrt_img2img.py CHANGED
@@ -42,7 +42,7 @@ from polygraphy.backend.trt import (
42
  network_from_onnx_path,
43
  save_engine,
44
  )
45
- from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
46
 
47
  from diffusers import DiffusionPipeline
48
  from diffusers.configuration_utils import FrozenDict, deprecate
@@ -679,7 +679,7 @@ class TensorRTStableDiffusionImg2ImgPipeline(DiffusionPipeline):
679
  safety_checker ([`StableDiffusionSafetyChecker`]):
680
  Classification module that estimates whether generated images could be considered offensive or harmful.
681
  Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
682
- feature_extractor ([`CLIPFeatureExtractor`]):
683
  Model that extracts features from generated images to be used as inputs for the `safety_checker`.
684
  """
685
 
@@ -693,7 +693,7 @@ class TensorRTStableDiffusionImg2ImgPipeline(DiffusionPipeline):
693
  unet: UNet2DConditionModel,
694
  scheduler: DDIMScheduler,
695
  safety_checker: StableDiffusionSafetyChecker,
696
- feature_extractor: CLIPFeatureExtractor,
697
  image_encoder: CLIPVisionModelWithProjection = None,
698
  requires_safety_checker: bool = True,
699
  stages=["clip", "unet", "vae", "vae_encoder"],
 
42
  network_from_onnx_path,
43
  save_engine,
44
  )
45
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
46
 
47
  from diffusers import DiffusionPipeline
48
  from diffusers.configuration_utils import FrozenDict, deprecate
 
679
  safety_checker ([`StableDiffusionSafetyChecker`]):
680
  Classification module that estimates whether generated images could be considered offensive or harmful.
681
  Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
682
+ feature_extractor ([`CLIPImageProcessor`]):
683
  Model that extracts features from generated images to be used as inputs for the `safety_checker`.
684
  """
685
 
 
693
  unet: UNet2DConditionModel,
694
  scheduler: DDIMScheduler,
695
  safety_checker: StableDiffusionSafetyChecker,
696
+ feature_extractor: CLIPImageProcessor,
697
  image_encoder: CLIPVisionModelWithProjection = None,
698
  requires_safety_checker: bool = True,
699
  stages=["clip", "unet", "vae", "vae_encoder"],
main/stable_diffusion_tensorrt_inpaint.py CHANGED
@@ -42,7 +42,7 @@ from polygraphy.backend.trt import (
42
  network_from_onnx_path,
43
  save_engine,
44
  )
45
- from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
46
 
47
  from diffusers import DiffusionPipeline
48
  from diffusers.configuration_utils import FrozenDict, deprecate
@@ -683,7 +683,7 @@ class TensorRTStableDiffusionInpaintPipeline(DiffusionPipeline):
683
  safety_checker ([`StableDiffusionSafetyChecker`]):
684
  Classification module that estimates whether generated images could be considered offensive or harmful.
685
  Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
686
- feature_extractor ([`CLIPFeatureExtractor`]):
687
  Model that extracts features from generated images to be used as inputs for the `safety_checker`.
688
  """
689
 
@@ -697,7 +697,7 @@ class TensorRTStableDiffusionInpaintPipeline(DiffusionPipeline):
697
  unet: UNet2DConditionModel,
698
  scheduler: DDIMScheduler,
699
  safety_checker: StableDiffusionSafetyChecker,
700
- feature_extractor: CLIPFeatureExtractor,
701
  image_encoder: CLIPVisionModelWithProjection = None,
702
  requires_safety_checker: bool = True,
703
  stages=["clip", "unet", "vae", "vae_encoder"],
 
42
  network_from_onnx_path,
43
  save_engine,
44
  )
45
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
46
 
47
  from diffusers import DiffusionPipeline
48
  from diffusers.configuration_utils import FrozenDict, deprecate
 
683
  safety_checker ([`StableDiffusionSafetyChecker`]):
684
  Classification module that estimates whether generated images could be considered offensive or harmful.
685
  Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
686
+ feature_extractor ([`CLIPImageProcessor`]):
687
  Model that extracts features from generated images to be used as inputs for the `safety_checker`.
688
  """
689
 
 
697
  unet: UNet2DConditionModel,
698
  scheduler: DDIMScheduler,
699
  safety_checker: StableDiffusionSafetyChecker,
700
+ feature_extractor: CLIPImageProcessor,
701
  image_encoder: CLIPVisionModelWithProjection = None,
702
  requires_safety_checker: bool = True,
703
  stages=["clip", "unet", "vae", "vae_encoder"],
main/stable_diffusion_tensorrt_txt2img.py CHANGED
@@ -42,7 +42,7 @@ from polygraphy.backend.trt import (
42
  network_from_onnx_path,
43
  save_engine,
44
  )
45
- from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
46
 
47
  from diffusers import DiffusionPipeline
48
  from diffusers.configuration_utils import FrozenDict, deprecate
@@ -595,7 +595,7 @@ class TensorRTStableDiffusionPipeline(DiffusionPipeline):
595
  safety_checker ([`StableDiffusionSafetyChecker`]):
596
  Classification module that estimates whether generated images could be considered offensive or harmful.
597
  Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
598
- feature_extractor ([`CLIPFeatureExtractor`]):
599
  Model that extracts features from generated images to be used as inputs for the `safety_checker`.
600
  """
601
 
@@ -609,7 +609,7 @@ class TensorRTStableDiffusionPipeline(DiffusionPipeline):
609
  unet: UNet2DConditionModel,
610
  scheduler: DDIMScheduler,
611
  safety_checker: StableDiffusionSafetyChecker,
612
- feature_extractor: CLIPFeatureExtractor,
613
  image_encoder: CLIPVisionModelWithProjection = None,
614
  requires_safety_checker: bool = True,
615
  stages=["clip", "unet", "vae"],
 
42
  network_from_onnx_path,
43
  save_engine,
44
  )
45
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
46
 
47
  from diffusers import DiffusionPipeline
48
  from diffusers.configuration_utils import FrozenDict, deprecate
 
595
  safety_checker ([`StableDiffusionSafetyChecker`]):
596
  Classification module that estimates whether generated images could be considered offensive or harmful.
597
  Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
598
+ feature_extractor ([`CLIPImageProcessor`]):
599
  Model that extracts features from generated images to be used as inputs for the `safety_checker`.
600
  """
601
 
 
609
  unet: UNet2DConditionModel,
610
  scheduler: DDIMScheduler,
611
  safety_checker: StableDiffusionSafetyChecker,
612
+ feature_extractor: CLIPImageProcessor,
613
  image_encoder: CLIPVisionModelWithProjection = None,
614
  requires_safety_checker: bool = True,
615
  stages=["clip", "unet", "vae"],