helenai commited on
Commit
dec394f
1 Parent(s): a8331b4

commit files to HF hub

Browse files
README.md ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ tags:
4
+ - stable-diffusion
5
+ - text-to-image
6
+ - openvino
7
+ inference: true
8
+
9
+
10
+ ---
11
+
12
+ # OpenVINO Stable Diffusion
13
+
14
+ ## Lykon/DreamShaper
15
+
16
+ This repository contains the models from [Lykon/DreamShaper](https://huggingface.co/Lykon/DreamShaper) converted to
17
+ OpenVINO, for accelerated inference on CPU or Intel GPU with OpenVINO's integration into Optimum:
18
+ [optimum-intel](https://github.com/huggingface/optimum-intel#openvino). The model weights are stored with FP16
19
+ precision, which reduces the size of the model by half.
20
+
21
+ Please check out the [source model repository](https://huggingface.co/Lykon/DreamShaper) for more information about the model and its license.
22
+
23
+ To install the requirements for this demo, do `pip install optimum[openvino]`. This installs all the necessary dependencies,
24
+ including Transformers and OpenVINO. For more detailed steps, please see this [installation guide](https://github.com/helena-intel/optimum-intel/wiki/OpenVINO-Integration-Installation-Guide).
25
+
26
+ The simplest way to generate an image with stable diffusion takes only two lines of code, as shown below. The first line downloads the
27
+ model from the Hugging Face hub (if it has not been downloaded before) and loads it; the second line generates an image.
28
+
29
+ ```python
30
+ from optimum.intel.openvino import OVStableDiffusionPipeline
31
+
32
+ stable_diffusion = OVStableDiffusionPipeline.from_pretrained("Lykon/DreamShaper")
33
+ images = stable_diffusion("a random image").images
34
+ ```
35
+
36
+ The following example code uses static shapes for even faster inference. Using larger image sizes will
37
+ require more memory and take longer to generate.
38
+
39
+ If you have an 11th generation or later Intel Core processor, you can use the integrated GPU for inference, and if you have an Intel
40
+ discrete GPU, you can use that. Add the line `stable_diffusion.to("GPU")` before `stable_diffusion.compile()` in the example below.
41
+ Model loading will take some time the first time, but will be faster after that, because the model will be cached. On GPU, for stable
42
+ diffusion only static shapes are supported at the moment.
43
+
44
+
45
+ ```python
46
+ from optimum.intel.openvino.modeling_diffusion import OVStableDiffusionPipeline
47
+
48
+ batch_size = 1
49
+ num_images_per_prompt = 1
50
+ height = 256
51
+ width = 256
52
+
53
+ # load the model and reshape to static shapes for faster inference
54
+ model_id = "Lykon/DreamShaper"
55
+ stable_diffusion = OVStableDiffusionPipeline.from_pretrained(model_id, compile=False)
56
+ stable_diffusion.reshape( batch_size=batch_size, height=height, width=width, num_images_per_prompt=num_images_per_prompt)
57
+ stable_diffusion.compile()
58
+
59
+ # generate image!
60
+ prompt = "a random image"
61
+ images = stable_diffusion(prompt, height=height, width=width, num_images_per_prompt=num_images_per_prompt).images
62
+ images[0].save("result.png")
63
+ ```
64
+
feature_extractor/preprocessor_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "feature_extractor_type": "CLIPFeatureExtractor",
12
+ "image_mean": [
13
+ 0.48145466,
14
+ 0.4578275,
15
+ 0.40821073
16
+ ],
17
+ "image_processor_type": "CLIPFeatureExtractor",
18
+ "image_std": [
19
+ 0.26862954,
20
+ 0.26130258,
21
+ 0.27577711
22
+ ],
23
+ "resample": 3,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "shortest_edge": 224
27
+ }
28
+ }
inference.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from optimum.intel.openvino.modeling_diffusion import OVStableDiffusionPipeline
2
+
3
+ batch_size = 1
4
+ num_images_per_prompt = 1
5
+ height = 256
6
+ width = 256
7
+
8
+ # load the model and reshape to static shapes for faster inference
9
+ model_id = "helenai/Lykon-DreamShaper-ov"
10
+ stable_diffusion = OVStableDiffusionPipeline.from_pretrained(model_id, compile=False)
11
+ stable_diffusion.reshape( batch_size=batch_size, height=height, width=width, num_images_per_prompt=num_images_per_prompt)
12
+ stable_diffusion.compile()
13
+
14
+ # generate image!
15
+ prompt = "a random image"
16
+ images = stable_diffusion(prompt, height=height, width=width, num_images_per_prompt=num_images_per_prompt).images
17
+ images[0].save("result.png")
18
+
model_index.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "OVStableDiffusionPipeline",
3
+ "_diffusers_version": "0.13.1",
4
+ "feature_extractor": [
5
+ "transformers",
6
+ "CLIPFeatureExtractor"
7
+ ],
8
+ "requires_safety_checker": true,
9
+ "safety_checker": [
10
+ "stable_diffusion",
11
+ "StableDiffusionSafetyChecker"
12
+ ],
13
+ "scheduler": [
14
+ "diffusers",
15
+ "PNDMScheduler"
16
+ ],
17
+ "text_encoder": [
18
+ "optimum",
19
+ "OVModelTextEncoder"
20
+ ],
21
+ "tokenizer": [
22
+ "transformers",
23
+ "CLIPTokenizer"
24
+ ],
25
+ "unet": [
26
+ "optimum",
27
+ "OVModelUnet"
28
+ ],
29
+ "vae_decoder": [
30
+ "optimum",
31
+ "OVModelVaeDecoder"
32
+ ]
33
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "PNDMScheduler",
3
+ "_diffusers_version": "0.13.1",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "num_train_timesteps": 1000,
9
+ "prediction_type": "epsilon",
10
+ "set_alpha_to_one": false,
11
+ "skip_prk_steps": true,
12
+ "steps_offset": 1,
13
+ "trained_betas": null
14
+ }
text_encoder/openvino_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a89453c0883fde55b631e01f8dcf20533a44c0af7e28918053e15cb5157a09cf
3
+ size 246121704
text_encoder/openvino_model.xml ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "do_lower_case": true,
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "model_max_length": 77,
22
+ "name_or_path": "models/Lykon-DreamShaper-ov/tokenizer",
23
+ "pad_token": "<|endoftext|>",
24
+ "special_tokens_map_file": "./special_tokens_map.json",
25
+ "tokenizer_class": "CLIPTokenizer",
26
+ "unk_token": {
27
+ "__type": "AddedToken",
28
+ "content": "<|endoftext|>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
unet/openvino_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68f6828abf84ff1dd6ecdc68f6397f99bbc02aa5bba2774857ab3a7e82627638
3
+ size 1719042636
unet/openvino_model.xml ADDED
The diff for this file is too large to render. See raw diff
 
vae_decoder/openvino_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53f4ac660fa91cfebad1febd5aac65ce76d9a0f537b4593838613c9d93e64270
3
+ size 98980700
vae_decoder/openvino_model.xml ADDED
The diff for this file is too large to render. See raw diff