Doron Adler commited on
Commit
5294eb3
1 Parent(s): a9d1952

Stable Diffusion 2.0 based Simpsons model

Browse files
README.md CHANGED
@@ -1,3 +1,71 @@
1
  ---
2
  license: creativeml-openrail-m
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: creativeml-openrail-m
3
+ language:
4
+ - en
5
+ thumbnail: "https://huggingface.co/Norod78/sd2-simpsons-blip/raw/main/sd2-simpsons-blip-sample_tile.jpg"
6
+ tags:
7
+ - stable-diffusion
8
+ - stable-diffusion-diffusers
9
+ - text-to-image
10
+ datasets:
11
+ - Norod78/simpsons-blip-captions
12
+ inference: true
13
  ---
14
+
15
+ # Simpsons diffusion v2.0
16
+ *Stable Diffusion v2.0 fine tuned on images related to "The Simpsons"
17
+
18
+ If you want more details on how to generate your own blip cpationed dataset see this [colab](https://colab.research.google.com/gist/Norod/ee6ee3c4bf11c2d2be531d728ec30824/buildimagedatasetwithblipcaptionsanduploadtohf.ipynb)
19
+
20
+ Training was done using a slightly modified version of Hugging-Face's text to image training [example script](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py)
21
+
22
+ ## About
23
+
24
+ Put in a text prompt and generate cartoony/simpsony images
25
+
26
+ ## AUTOMATIC1111 webui checkpoint
27
+
28
+ The [main](https://huggingface.co/Norod78/sd2-simpsonsblip/tree/main) folder contains a .ckpt and a .yaml file to be put in [stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) "stable-diffusion-webui/models/Stable-diffusion" folder and used to generate images
29
+
30
+ ## Sample code
31
+
32
+
33
+ ```py
34
+ from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler
35
+ import torch
36
+
37
+ # this will substitute the default PNDM scheduler for K-LMS
38
+ lms = LMSDiscreteScheduler(
39
+ beta_start=0.00085,
40
+ beta_end=0.012,
41
+ beta_schedule="scaled_linear"
42
+ )
43
+
44
+ guidance_scale=8.5
45
+ seed=777
46
+ steps=50
47
+
48
+ cartoon_model_path = "Norod78/sd2-simpsons-blip"
49
+ cartoon_pipe = StableDiffusionPipeline.from_pretrained(cartoon_model_path, scheduler=lms, torch_dtype=torch.float16)
50
+ cartoon_pipe.to("cuda")
51
+
52
+ def generate(prompt, file_prefix ,samples):
53
+ torch.manual_seed(seed)
54
+ prompt += ", Very detailed, clean, high quality, sharp image"
55
+ cartoon_images = cartoon_pipe([prompt] * samples, num_inference_steps=steps, guidance_scale=guidance_scale)["images"]
56
+ for idx, image in enumerate(cartoon_images):
57
+ image.save(f"{file_prefix}-{idx}-{seed}-sd2-simpsons-blip.jpg")
58
+
59
+ generate("An oil painting of Snoop Dogg as a simpsons character", "01_SnoopDog", 4)
60
+ generate("Gal Gadot, cartoon", "02_GalGadot", 4)
61
+ generate("A cartoony Simpsons town", "03_SimpsonsTown", 4)
62
+ generate("Pikachu with the Simpsons, Eric Wallis", "04_PikachuSimpsons", 4)
63
+ ```
64
+
65
+ ![Images generated by this sample code]("https://huggingface.co/Norod78/sd2-simpsons-blip/raw/main/sd2-simpsons-blip-sample_tile.jpg)
66
+
67
+ ## Dataset and Training
68
+
69
+ Finetuned for 10,000 iterations upon [stabilityai/stable-diffusion-2-base](https://huggingface.co/stabilityai/stable-diffusion-2-base) on [BLIP captioned Simpsons images](https://huggingface.co/datasets/Norod78/simpsons-blip-captions) using 1xA5000 GPU on my home desktop computer
70
+
71
+ Trained by [@Norod78](https://twitter.com/Norod78)
feature_extractor/preprocessor_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "feature_extractor_type": "CLIPFeatureExtractor",
12
+ "image_mean": [
13
+ 0.48145466,
14
+ 0.4578275,
15
+ 0.40821073
16
+ ],
17
+ "image_processor_type": "CLIPImageProcessor",
18
+ "image_std": [
19
+ 0.26862954,
20
+ 0.26130258,
21
+ 0.27577711
22
+ ],
23
+ "resample": 3,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "shortest_edge": 224
27
+ }
28
+ }
logs/text2image-fine-tune/1669736281.2270982/events.out.tfevents.1669736281.DESKTOP-7NAF1LL.21388.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7e3507a0073eff1e2a6ac3ba12afd3dafc762944b75bcd0760e84db45e7879e
3
+ size 1775
logs/text2image-fine-tune/events.out.tfevents.1669736281.DESKTOP-7NAF1LL.21388.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:845ef093d4c3d8c48c7841faabe6526be1aed1601496e27f7367d5ab8b775878
3
+ size 489961
model_index.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionPipeline",
3
+ "_diffusers_version": "0.9.0",
4
+ "feature_extractor": [
5
+ "transformers",
6
+ "CLIPImageProcessor"
7
+ ],
8
+ "requires_safety_checker": false,
9
+ "scheduler": [
10
+ "diffusers",
11
+ "PNDMScheduler"
12
+ ],
13
+ "text_encoder": [
14
+ "transformers",
15
+ "CLIPTextModel"
16
+ ],
17
+ "tokenizer": [
18
+ "transformers",
19
+ "CLIPTokenizer"
20
+ ],
21
+ "unet": [
22
+ "diffusers",
23
+ "UNet2DConditionModel"
24
+ ],
25
+ "vae": [
26
+ "diffusers",
27
+ "AutoencoderKL"
28
+ ]
29
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "PNDMScheduler",
3
+ "_diffusers_version": "0.9.0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "num_train_timesteps": 1000,
9
+ "set_alpha_to_one": false,
10
+ "skip_prk_steps": true,
11
+ "steps_offset": 1,
12
+ "trained_betas": null
13
+ }
sd2-simpsons-blip-example.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler
2
+ import torch
3
+
4
+ # this will substitute the default PNDM scheduler for K-LMS
5
+ lms = LMSDiscreteScheduler(
6
+ beta_start=0.00085,
7
+ beta_end=0.012,
8
+ beta_schedule="scaled_linear"
9
+ )
10
+
11
+ guidance_scale=8.5
12
+ seed=777
13
+ steps=50
14
+
15
+ cartoon_model_path = "Norod78/sd2-simpsons-blip"
16
+ cartoon_pipe = StableDiffusionPipeline.from_pretrained(cartoon_model_path, scheduler=lms, torch_dtype=torch.float16)
17
+ cartoon_pipe.to("cuda")
18
+
19
+ def generate(prompt, file_prefix ,samples):
20
+ torch.manual_seed(seed)
21
+ prompt += ", Very detailed, clean, high quality, sharp image"
22
+ cartoon_images = cartoon_pipe([prompt] * samples, num_inference_steps=steps, guidance_scale=guidance_scale)["images"]
23
+ for idx, image in enumerate(cartoon_images):
24
+ image.save(f"{file_prefix}-{idx}-{seed}-sd2-simpsons-blip.jpg")
25
+
26
+ generate("An oil painting of Snoop Dogg as a simpsons character", "01_SnoopDog", 4)
27
+ generate("Gal Gadot, cartoon", "02_GalGadot", 4)
28
+ generate("A cartoony Simpsons town", "03_SimpsonsTown", 4)
29
+ generate("Pikachu with the Simpsons, Eric Wallis", "04_PikachuSimpsons", 4)
30
+
31
+
32
+
33
+
sd2-simpsons-blip.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a364cd64836bfa608c96c2a72fbc27e0003fb3f56ea1d47b65fe2db4d3c17f33
3
+ size 2580353150
sd2-simpsons-blip.yaml ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-4
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "txt"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False # we set this to false because this is an inference only config
19
+
20
+ unet_config:
21
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
22
+ params:
23
+ use_checkpoint: True
24
+ use_fp16: True
25
+ image_size: 32 # unused
26
+ in_channels: 4
27
+ out_channels: 4
28
+ model_channels: 320
29
+ attention_resolutions: [ 4, 2, 1 ]
30
+ num_res_blocks: 2
31
+ channel_mult: [ 1, 2, 4, 4 ]
32
+ num_head_channels: 64 # need to fix for flash-attn
33
+ use_spatial_transformer: True
34
+ use_linear_in_transformer: True
35
+ transformer_depth: 1
36
+ context_dim: 1024
37
+ legacy: False
38
+
39
+ first_stage_config:
40
+ target: ldm.models.autoencoder.AutoencoderKL
41
+ params:
42
+ embed_dim: 4
43
+ monitor: val/rec_loss
44
+ ddconfig:
45
+ #attn_type: "vanilla-xformers"
46
+ double_z: true
47
+ z_channels: 4
48
+ resolution: 256
49
+ in_channels: 3
50
+ out_ch: 3
51
+ ch: 128
52
+ ch_mult:
53
+ - 1
54
+ - 2
55
+ - 4
56
+ - 4
57
+ num_res_blocks: 2
58
+ attn_resolutions: []
59
+ dropout: 0.0
60
+ lossconfig:
61
+ target: torch.nn.Identity
62
+
63
+ cond_stage_config:
64
+ target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
65
+ params:
66
+ freeze: True
67
+ layer: "penultimate"
text_encoder/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "stabilityai/stable-diffusion-2-base",
3
+ "architectures": [
4
+ "CLIPTextModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_size": 1024,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 23,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 512,
22
+ "torch_dtype": "float16",
23
+ "transformers_version": "4.25.0.dev0",
24
+ "vocab_size": 49408
25
+ }
text_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f988248177fc727cf066267b3ccb62e7a74b7e3c5b3efa8a701d563e0f0ea037
3
+ size 680901463
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "!",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "do_lower_case": true,
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "model_max_length": 77,
22
+ "name_or_path": "stabilityai/stable-diffusion-2-base",
23
+ "pad_token": "<|endoftext|>",
24
+ "special_tokens_map_file": "./special_tokens_map.json",
25
+ "tokenizer_class": "CLIPTokenizer",
26
+ "unk_token": {
27
+ "__type": "AddedToken",
28
+ "content": "<|endoftext|>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
unet/config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.9.0",
4
+ "_name_or_path": "stabilityai/stable-diffusion-2-base",
5
+ "act_fn": "silu",
6
+ "attention_head_dim": [
7
+ 5,
8
+ 10,
9
+ 20,
10
+ 20
11
+ ],
12
+ "block_out_channels": [
13
+ 320,
14
+ 640,
15
+ 1280,
16
+ 1280
17
+ ],
18
+ "center_input_sample": false,
19
+ "cross_attention_dim": 1024,
20
+ "down_block_types": [
21
+ "CrossAttnDownBlock2D",
22
+ "CrossAttnDownBlock2D",
23
+ "CrossAttnDownBlock2D",
24
+ "DownBlock2D"
25
+ ],
26
+ "downsample_padding": 1,
27
+ "dual_cross_attention": false,
28
+ "flip_sin_to_cos": true,
29
+ "freq_shift": 0,
30
+ "in_channels": 4,
31
+ "layers_per_block": 2,
32
+ "mid_block_scale_factor": 1,
33
+ "norm_eps": 1e-05,
34
+ "norm_num_groups": 32,
35
+ "num_class_embeds": null,
36
+ "only_cross_attention": false,
37
+ "out_channels": 4,
38
+ "sample_size": 64,
39
+ "up_block_types": [
40
+ "UpBlock2D",
41
+ "CrossAttnUpBlock2D",
42
+ "CrossAttnUpBlock2D",
43
+ "CrossAttnUpBlock2D"
44
+ ],
45
+ "use_linear_projection": true
46
+ }
unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36186345a450f4dcf3221d402e0f73e70828bfaa5a9d72a301a2ec5edce2a324
3
+ size 3463923045
vae/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.9.0",
4
+ "_name_or_path": "stabilityai/stable-diffusion-2-base",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "in_channels": 3,
19
+ "latent_channels": 4,
20
+ "layers_per_block": 2,
21
+ "norm_num_groups": 32,
22
+ "out_channels": 3,
23
+ "sample_size": 512,
24
+ "up_block_types": [
25
+ "UpDecoderBlock2D",
26
+ "UpDecoderBlock2D",
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D"
29
+ ]
30
+ }
vae/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb128b1f37e0c381c440128b217d29613b3e08b9e4ea7f20466424145ba538b0
3
+ size 167402961