goattheman commited on
Commit
fbbf9b1
·
verified ·
1 Parent(s): 6a26ca0

Upload 3 files

Browse files

controlnet-sd21-depth-diffusers

Files changed (3) hide show
  1. README.md +25 -2
  2. config.json +47 -0
  3. diffusion_pytorch_model.bin +3 -0
README.md CHANGED
@@ -1,5 +1,28 @@
1
  ---
2
  license: other
3
- license_name: other
4
- license_link: LICENSE
 
 
 
 
 
5
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: other
3
+ language:
4
+ - en
5
+ tags:
6
+ - art
7
+ - diffusers
8
+ - stable diffusion
9
+ - controlnet
10
  ---
11
+ Here's the first version of controlnet for stablediffusion 2.1 for diffusers
12
+ Trained on a subset of laion/laion-art
13
+
14
+ License: refers to the different preprocessor's ones.
15
+
16
+
17
+ ### Depth:
18
+ ![<depth> 0](https://huggingface.co/thibaud/controlnet-sd21/resolve/main/example_depth.png)
19
+
20
+
21
+ ### Misuse, Malicious Use, and Out-of-Scope Use
22
+
23
+ The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes.
24
+
25
+
26
+ Thanks
27
+ - https://huggingface.co/lllyasviel/ControlNet for the implementation and the release of 1.5 models.
28
+ - https://huggingface.co/thepowefuldeez for the conversion script to diffusers
config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "ControlNetModel",
3
+ "_diffusers_version": "0.15.0.dev0",
4
+ "_name_or_path": "./depth-sd21",
5
+ "act_fn": "silu",
6
+ "attention_head_dim": [
7
+ 5,
8
+ 10,
9
+ 20,
10
+ 20
11
+ ],
12
+ "block_out_channels": [
13
+ 320,
14
+ 640,
15
+ 1280,
16
+ 1280
17
+ ],
18
+ "class_embed_type": null,
19
+ "conditioning_embedding_out_channels": [
20
+ 16,
21
+ 32,
22
+ 96,
23
+ 256
24
+ ],
25
+ "controlnet_conditioning_channel_order": "rgb",
26
+ "cross_attention_dim": 1024,
27
+ "down_block_types": [
28
+ "CrossAttnDownBlock2D",
29
+ "CrossAttnDownBlock2D",
30
+ "CrossAttnDownBlock2D",
31
+ "DownBlock2D"
32
+ ],
33
+ "downsample_padding": 1,
34
+ "flip_sin_to_cos": true,
35
+ "freq_shift": 0,
36
+ "in_channels": 4,
37
+ "layers_per_block": 2,
38
+ "mid_block_scale_factor": 1,
39
+ "norm_eps": 1e-05,
40
+ "norm_num_groups": 32,
41
+ "num_class_embeds": null,
42
+ "only_cross_attention": false,
43
+ "projection_class_embeddings_input_dim": null,
44
+ "resnet_time_scale_shift": "default",
45
+ "upcast_attention": true,
46
+ "use_linear_projection": true
47
+ }
diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:454116d07f006f8e7660dd3f965c9fafdb31dc58a2d13100897b5f40acd9cd23
3
+ size 728594745