shichen1231
commited on
Commit
•
00edff5
1
Parent(s):
c007838
Upload 2 files
Browse files- README.md +70 -0
- config.json +13 -1
README.md
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: creativeml-openrail-m
|
3 |
+
datasets:
|
4 |
+
- ioclab/grayscale_image_aesthetic_3M
|
5 |
+
language:
|
6 |
+
- en
|
7 |
+
---
|
8 |
+
|
9 |
+
# Model Card for ioclab/ioc-controlnet
|
10 |
+
|
11 |
+
This model brings brightness control to Stable Diffusion, allowing users to colorize grayscale images or recolor generated images.
|
12 |
+
|
13 |
+
## Model Details
|
14 |
+
|
15 |
+
- **Developed by:** [@shichen](https://github.com/chenbaiyujason)
|
16 |
+
- **Shared by [optional]:** [More Information Needed]
|
17 |
+
- **Model type:** Stable Diffusion ControlNet model for [web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
|
18 |
+
- **License:** [The CreativeML OpenRAIL M license](https://huggingface.co/spaces/CompVis/stable-diffusion-license) is an [Open RAIL M license](https://www.licenses.ai/blog/2022/8/18/naming-convention-of-responsible-ai-licenses), adapted from the work that [BigScience](https://bigscience.huggingface.co/) and [the RAIL Initiative](https://www.licenses.ai/) are jointly carrying in the area of responsible AI licensing. See also [the article about the BLOOM Open RAIL license](https://bigscience.huggingface.co/blog/the-bigscience-rail-license) on which our license is based.
|
19 |
+
|
20 |
+
|
21 |
+
## Uses
|
22 |
+
|
23 |
+
Recommendation Weight: **0.4-0.9**
|
24 |
+
|
25 |
+
Recommendation Exit Timing: **0.4-0.9**
|
26 |
+
|
27 |
+
As more datasets are still being trained in this model, it is expected to take 2-4 days. Therefore, flexible weight adjustments should be made based on different scenarios and specific results. If you have generated good images or encountered any problems, you can discuss them on Hugging Face~~~
|
28 |
+
|
29 |
+
![1](./image/s33.png)
|
30 |
+
|
31 |
+
![1](./image/s9.jpg)
|
32 |
+
|
33 |
+
![1](./image/s10.jpg)
|
34 |
+
|
35 |
+
![1](./image/s11.jpg)
|
36 |
+
|
37 |
+
![1](./image/s34.png)
|
38 |
+
|
39 |
+
|
40 |
+
![1](./image/s26.jpg)
|
41 |
+
|
42 |
+
|
43 |
+
For more information, please refer to the document link at the bottom.
|
44 |
+
|
45 |
+
Please note that the model is still under training iteration!! Please come back every 3 days to check if the latest modified version has been released.
|
46 |
+
|
47 |
+
|
48 |
+
|
49 |
+
### HuggingFace Space Demo
|
50 |
+
|
51 |
+
Waiting for upload
|
52 |
+
|
53 |
+
<!-- [huggingface.co/spaces/ioclab/brightness-controlnet](https://huggingface.co/spaces/ioclab/brightness-controlnet) -->
|
54 |
+
|
55 |
+
### Direct Use
|
56 |
+
|
57 |
+
[More Information Needed]
|
58 |
+
|
59 |
+
### Out-of-Scope Use
|
60 |
+
|
61 |
+
[More Information Needed]
|
62 |
+
|
63 |
+
## Bias, Risks, and Limitations
|
64 |
+
|
65 |
+
[More Information Needed]
|
66 |
+
|
67 |
+
|
68 |
+
## More Info
|
69 |
+
|
70 |
+
[illumination ControlNet 使用教程](https://aigc.ioclab.com/sd-showcase/light_controlnet.html) (Chinese)
|
config.json
CHANGED
@@ -1,6 +1,8 @@
|
|
1 |
{
|
2 |
-
"_class_name": "
|
3 |
"_diffusers_version": "0.16.0.dev0",
|
|
|
|
|
4 |
"attention_head_dim": 8,
|
5 |
"block_out_channels": [
|
6 |
320,
|
@@ -8,6 +10,7 @@
|
|
8 |
1280,
|
9 |
1280
|
10 |
],
|
|
|
11 |
"conditioning_embedding_out_channels": [
|
12 |
16,
|
13 |
32,
|
@@ -22,12 +25,21 @@
|
|
22 |
"CrossAttnDownBlock2D",
|
23 |
"DownBlock2D"
|
24 |
],
|
|
|
25 |
"dropout": 0.0,
|
26 |
"flip_sin_to_cos": true,
|
27 |
"freq_shift": 0,
|
|
|
28 |
"in_channels": 4,
|
29 |
"layers_per_block": 2,
|
|
|
|
|
|
|
|
|
30 |
"only_cross_attention": false,
|
|
|
|
|
31 |
"sample_size": 32,
|
|
|
32 |
"use_linear_projection": false
|
33 |
}
|
|
|
1 |
{
|
2 |
+
"_class_name": "ControlNetModel",
|
3 |
"_diffusers_version": "0.16.0.dev0",
|
4 |
+
"_name_or_path": "./controlnet_flax",
|
5 |
+
"act_fn": "silu",
|
6 |
"attention_head_dim": 8,
|
7 |
"block_out_channels": [
|
8 |
320,
|
|
|
10 |
1280,
|
11 |
1280
|
12 |
],
|
13 |
+
"class_embed_type": null,
|
14 |
"conditioning_embedding_out_channels": [
|
15 |
16,
|
16 |
32,
|
|
|
25 |
"CrossAttnDownBlock2D",
|
26 |
"DownBlock2D"
|
27 |
],
|
28 |
+
"downsample_padding": 1,
|
29 |
"dropout": 0.0,
|
30 |
"flip_sin_to_cos": true,
|
31 |
"freq_shift": 0,
|
32 |
+
"global_pool_conditions": false,
|
33 |
"in_channels": 4,
|
34 |
"layers_per_block": 2,
|
35 |
+
"mid_block_scale_factor": 1,
|
36 |
+
"norm_eps": 1e-05,
|
37 |
+
"norm_num_groups": 32,
|
38 |
+
"num_class_embeds": null,
|
39 |
"only_cross_attention": false,
|
40 |
+
"projection_class_embeddings_input_dim": null,
|
41 |
+
"resnet_time_scale_shift": "default",
|
42 |
"sample_size": 32,
|
43 |
+
"upcast_attention": false,
|
44 |
"use_linear_projection": false
|
45 |
}
|