AlexWortega commited on
Commit
ebede6b
1 Parent(s): 216c305
README.md DELETED
@@ -1,78 +0,0 @@
1
- ---
2
- language:
3
- - en
4
- tags:
5
- - stable-diffusion
6
- - text-to-image
7
- license: creativeml-openrail-m
8
- inference: true
9
-
10
- ---
11
-
12
- ![image](photo_2023-01-06_00-30-33.jpg)
13
- <sub>pink hair guy in glasses, photograph, sporty body, cinematic lighting, clear eyes, perfect face, blush, beautiful nose, beautiful eyes, detailed eyes</sub>
14
-
15
-
16
- # Anime Diffusion
17
-
18
- Anime Diffusion is a latent text-to-image diffusion model trained on BLIP captions for Danbou set + Demon slayer + arts from 4ch
19
-
20
-
21
-
22
- ## Model Description
23
-
24
- ## License
25
-
26
- This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.
27
- The CreativeML OpenRAIL License specifies:
28
-
29
- 1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content
30
- 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license
31
- 3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)
32
- [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
33
-
34
- ## Downstream Uses
35
-
36
- This model can be used for entertainment purposes and as a generative art assistant.
37
-
38
- ## Example Code
39
-
40
- ```python
41
- import torch
42
- from torch import autocast
43
- from diffusers import StableDiffusionPipeline
44
-
45
- pipe = StableDiffusionPipeline.from_pretrained(
46
- 'AlexWortega/AnimeDiffuion',
47
- torch_dtype=torch.float32
48
- ).to('cuda')
49
-
50
- negative_prompt = """low-res, duplicate, poorly drawn face, ugly, undetailed face"""
51
- d = 'white girl'
52
- prompt = f"pink hair guy in glasses, photograph, sporty body, cinematic lighting, clear eyes, perfect face, blush, beautiful nose, beautiful eyes, detailed eyes"
53
- num_samples = 1
54
-
55
- with torch.inference_mode():
56
- images = pipe([prompt] * num_samples,
57
- negative_prompt = [negative_prompt]*num_samples,
58
- height=512, width=512,
59
- num_inference_steps=50,
60
- guidance_scale=8,
61
- ).images
62
-
63
-
64
-
65
- images[0].save("test.png")
66
- ```
67
-
68
- ## Team Member and Acknowledgements
69
-
70
- This project would not have been possible without the incredible work by the [CompVis Researchers](https://ommer-lab.com/).
71
-
72
- - [Alex Wortega](https://github.com/AlexWortega)
73
- <sub>alexwortega@yandex.ru</sub>
74
-
75
-
76
- In order to reach me, here is my blog:
77
-
78
- [![My tg channel]](https://t.me/lovedeathtransformers)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
photo_2023-01-06_00-30-33.jpg DELETED
Binary file (20.8 kB)
 
text_encoder/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "runwayml/stable-diffusion-v1-5",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
 
1
  {
2
+ "_name_or_path": "22h/vintedois-diffusion-v0-1",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
text_encoder/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5dd9b0358a4e55636562b889a35463f7e39c85d9eefc5ce4819d39e5d038e8cc
3
- size 492308087
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a20d724dacc3f94e73753462567b578a6395b725e17b4ddaeddf0dd3bc148729
3
+ size 492309793
tokenizer/tokenizer_config.json CHANGED
@@ -19,7 +19,7 @@
19
  },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
- "name_or_path": "runwayml/stable-diffusion-v1-5",
23
  "pad_token": "<|endoftext|>",
24
  "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
 
19
  },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
+ "name_or_path": "22h/vintedois-diffusion-v0-1",
23
  "pad_token": "<|endoftext|>",
24
  "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
unet/config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
  "_diffusers_version": "0.9.0",
4
- "_name_or_path": "runwayml/stable-diffusion-v1-5",
5
  "act_fn": "silu",
6
  "attention_head_dim": 8,
7
  "block_out_channels": [
@@ -11,6 +11,7 @@
11
  1280
12
  ],
13
  "center_input_sample": false,
 
14
  "cross_attention_dim": 768,
15
  "down_block_types": [
16
  "CrossAttnDownBlock2D",
@@ -25,11 +26,13 @@
25
  "in_channels": 4,
26
  "layers_per_block": 2,
27
  "mid_block_scale_factor": 1,
 
28
  "norm_eps": 1e-05,
29
  "norm_num_groups": 32,
30
  "num_class_embeds": null,
31
  "only_cross_attention": false,
32
  "out_channels": 4,
 
33
  "sample_size": 64,
34
  "up_block_types": [
35
  "UpBlock2D",
@@ -37,5 +40,6 @@
37
  "CrossAttnUpBlock2D",
38
  "CrossAttnUpBlock2D"
39
  ],
 
40
  "use_linear_projection": false
41
  }
 
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
  "_diffusers_version": "0.9.0",
4
+ "_name_or_path": "22h/vintedois-diffusion-v0-1",
5
  "act_fn": "silu",
6
  "attention_head_dim": 8,
7
  "block_out_channels": [
 
11
  1280
12
  ],
13
  "center_input_sample": false,
14
+ "class_embed_type": null,
15
  "cross_attention_dim": 768,
16
  "down_block_types": [
17
  "CrossAttnDownBlock2D",
 
26
  "in_channels": 4,
27
  "layers_per_block": 2,
28
  "mid_block_scale_factor": 1,
29
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
30
  "norm_eps": 1e-05,
31
  "norm_num_groups": 32,
32
  "num_class_embeds": null,
33
  "only_cross_attention": false,
34
  "out_channels": 4,
35
+ "resnet_time_scale_shift": "default",
36
  "sample_size": 64,
37
  "up_block_types": [
38
  "UpBlock2D",
 
40
  "CrossAttnUpBlock2D",
41
  "CrossAttnUpBlock2D"
42
  ],
43
+ "upcast_attention": false,
44
  "use_linear_projection": false
45
  }
unet/diffusion_pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0268e5aa74ef2b5834e9a15fa1e7fee49d5e50cc04a309a4ceb4619a4620439b
3
- size 3438364325
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16c91b5245454e5c90bc32617c502f610adcb1efe008af7ce2749d3229f01fa0
3
+ size 3438375973
vae/config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "_class_name": "AutoencoderKL",
3
  "_diffusers_version": "0.9.0",
4
- "_name_or_path": "runwayml/stable-diffusion-v1-5",
5
  "act_fn": "silu",
6
  "block_out_channels": [
7
  128,
@@ -20,7 +20,7 @@
20
  "layers_per_block": 2,
21
  "norm_num_groups": 32,
22
  "out_channels": 3,
23
- "sample_size": 512,
24
  "up_block_types": [
25
  "UpDecoderBlock2D",
26
  "UpDecoderBlock2D",
 
1
  {
2
  "_class_name": "AutoencoderKL",
3
  "_diffusers_version": "0.9.0",
4
+ "_name_or_path": "22h/vintedois-diffusion-v0-1",
5
  "act_fn": "silu",
6
  "block_out_channels": [
7
  128,
 
20
  "layers_per_block": 2,
21
  "norm_num_groups": 32,
22
  "out_channels": 3,
23
+ "sample_size": 256,
24
  "up_block_types": [
25
  "UpDecoderBlock2D",
26
  "UpDecoderBlock2D",
vae/diffusion_pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cce6424c70219abf32af8aa0bdc3e03cccfb0be848533f95e29abcecfd51088f
3
- size 334710673
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4302e1efa25f3a47ceb7536bc335715ad9d1f203e90c2d25507600d74006e89
3
+ size 334715313