Upload folder using huggingface_hub
Browse files- .gitattributes +19 -0
- README.md +134 -0
- README_from_modelscope.md +141 -0
- assets/image_1_0_0.png +3 -0
- assets/image_1_1_0.png +3 -0
- assets/image_1_2_0.png +3 -0
- assets/image_1_3_0.png +3 -0
- assets/image_1_4_0.png +3 -0
- assets/image_1_5_0.png +3 -0
- assets/image_1_6_0.png +3 -0
- assets/image_1_7_0.png +3 -0
- assets/image_1_input.png +3 -0
- assets/image_2_0_0.png +3 -0
- assets/image_2_1_0.png +3 -0
- assets/image_2_2_0.png +3 -0
- assets/image_2_3_0.png +3 -0
- assets/image_2_input.png +3 -0
- assets/image_3_0_0.png +3 -0
- assets/image_3_1_0.png +3 -0
- assets/image_3_2_0.png +3 -0
- assets/image_3_3_0.png +3 -0
- assets/image_3_input.png +3 -0
- configuration.json +1 -0
- transformer/config.json +20 -0
- transformer/diffusion_pytorch_model-00001-of-00005.safetensors +3 -0
- transformer/diffusion_pytorch_model-00002-of-00005.safetensors +3 -0
- transformer/diffusion_pytorch_model-00003-of-00005.safetensors +3 -0
- transformer/diffusion_pytorch_model-00004-of-00005.safetensors +3 -0
- transformer/diffusion_pytorch_model-00005-of-00005.safetensors +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,22 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
assets/image_1_0_0.png filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
assets/image_1_1_0.png filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
assets/image_1_2_0.png filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
assets/image_1_3_0.png filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
assets/image_1_4_0.png filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
assets/image_1_5_0.png filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
assets/image_1_6_0.png filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
assets/image_1_7_0.png filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
assets/image_1_input.png filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
assets/image_2_0_0.png filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
assets/image_2_1_0.png filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
assets/image_2_2_0.png filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
assets/image_2_3_0.png filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
assets/image_2_input.png filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
assets/image_3_0_0.png filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
assets/image_3_1_0.png filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
assets/image_3_2_0.png filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
assets/image_3_3_0.png filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
assets/image_3_input.png filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
---
|
| 4 |
+
# Qwen-Image-Layered
|
| 5 |
+
|
| 6 |
+
## Model Introduction
|
| 7 |
+
|
| 8 |
+
This model is trained based on [Qwen/Qwen-Image-Layered](https://modelscope.cn/models/Qwen/Qwen-Image-Layered) using the dataset [artplus/PrismLayersPro](https://modelscope.cn/datasets/artplus/PrismLayersPro), enabling text-controlled extraction of segmented image layers.
|
| 9 |
+
|
| 10 |
+
## Usage Tips
|
| 11 |
+
|
| 12 |
+
* The model architecture has been modified from multi-image output to single-image output, producing only the layer relevant to the textual description.
|
| 13 |
+
* The model was trained exclusively on English text but inherits Chinese language understanding capabilities from the base model.
|
| 14 |
+
* The native training resolution is 1024x1024; however, inference at other resolutions is supported.
|
| 15 |
+
* The model struggles to separate multiple overlapping entities (e.g., the cartoon skeleton and hat in the examples).
|
| 16 |
+
* The model excels at decomposing poster-like images but performs poorly on photographic images, especially those involving complex lighting and shadows.
|
| 17 |
+
* Negative prompts are supported—use them to specify content you want excluded from the output.
|
| 18 |
+
|
| 19 |
+
## Demo Examples
|
| 20 |
+
|
| 21 |
+
**Some images contain white text on light backgrounds. Users of ModelScope community should click the "☀︎" icon at the top-right corner to switch to dark mode for better visibility.**
|
| 22 |
+
|
| 23 |
+
### Example 1
|
| 24 |
+
|
| 25 |
+
<div style="display: flex; justify-content: space-between;">
|
| 26 |
+
|
| 27 |
+
<div style="width: 30%;">
|
| 28 |
+
|
| 29 |
+
|Input Image|
|
| 30 |
+
|-|
|
| 31 |
+
||
|
| 32 |
+
|
| 33 |
+
</div>
|
| 34 |
+
|
| 35 |
+
<div style="width: 66%;">
|
| 36 |
+
|
| 37 |
+
|Prompt|Output Image|Prompt|Output Image|
|
| 38 |
+
|-|-|-|-|
|
| 39 |
+
|A solid, uniform color with no distinguishable features or objects||Text 'TRICK'||
|
| 40 |
+
|Cloud||Text 'TRICK OR TREAT'||
|
| 41 |
+
|A cartoon skeleton character wearing a purple hat and holding a gift box||Text 'TRICK OR'||
|
| 42 |
+
|A purple hat and a head||A gift box||
|
| 43 |
+
|
| 44 |
+
</div>
|
| 45 |
+
|
| 46 |
+
</div>
|
| 47 |
+
|
| 48 |
+
### Example 2
|
| 49 |
+
|
| 50 |
+
<div style="display: flex; justify-content: space-between;">
|
| 51 |
+
|
| 52 |
+
<div style="width: 30%;">
|
| 53 |
+
|
| 54 |
+
|Input Image|
|
| 55 |
+
|-|
|
| 56 |
+
||
|
| 57 |
+
|
| 58 |
+
</div>
|
| 59 |
+
|
| 60 |
+
<div style="width: 66%;">
|
| 61 |
+
|
| 62 |
+
|Prompt|Output Image|Prompt|Output Image|
|
| 63 |
+
|-|-|-|-|
|
| 64 |
+
|蓝天,白云,一片花园,花园里有五颜六色的花||五彩的精致花环||
|
| 65 |
+
|少女、花环、小猫||少女、小猫||
|
| 66 |
+
|
| 67 |
+
</div>
|
| 68 |
+
|
| 69 |
+
</div>
|
| 70 |
+
|
| 71 |
+
### Example 3
|
| 72 |
+
|
| 73 |
+
<div style="display: flex; justify-content: space-between;">
|
| 74 |
+
|
| 75 |
+
<div style="width: 30%;">
|
| 76 |
+
|
| 77 |
+
|Input Image|
|
| 78 |
+
|-|
|
| 79 |
+
||
|
| 80 |
+
|
| 81 |
+
</div>
|
| 82 |
+
|
| 83 |
+
<div style="width: 66%;">
|
| 84 |
+
|
| 85 |
+
|Prompt|Output Image|Prompt|Output Image|
|
| 86 |
+
|-|-|-|-|
|
| 87 |
+
|一片湛蓝的天空和波涛汹涌的大海||文字“向往的生活”||
|
| 88 |
+
|一只海鸥||文字“生活”||
|
| 89 |
+
|
| 90 |
+
</div>
|
| 91 |
+
|
| 92 |
+
</div>
|
| 93 |
+
|
| 94 |
+
## Inference Code
|
| 95 |
+
|
| 96 |
+
Install DiffSynth-Studio:
|
| 97 |
+
|
| 98 |
+
```
|
| 99 |
+
git clone https://github.com/modelscope/DiffSynth-Studio.git
|
| 100 |
+
cd DiffSynth-Studio
|
| 101 |
+
pip install -e .
|
| 102 |
+
```
|
| 103 |
+
|
| 104 |
+
Model Inference:
|
| 105 |
+
|
| 106 |
+
```python
|
| 107 |
+
from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
|
| 108 |
+
from PIL import Image
|
| 109 |
+
import torch, requests
|
| 110 |
+
|
| 111 |
+
pipe = QwenImagePipeline.from_pretrained(
|
| 112 |
+
torch_dtype=torch.bfloat16,
|
| 113 |
+
device="cuda",
|
| 114 |
+
model_configs=[
|
| 115 |
+
ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Layered-Control", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
|
| 116 |
+
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
|
| 117 |
+
ModelConfig(model_id="Qwen/Qwen-Image-Layered", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
|
| 118 |
+
],
|
| 119 |
+
processor_config=ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="processor/"),
|
| 120 |
+
)
|
| 121 |
+
prompt = "A cartoon skeleton character wearing a purple hat and holding a gift box"
|
| 122 |
+
input_image = requests.get("https://modelscope.oss-cn-beijing.aliyuncs.com/resource/images/trick_or_treat.png", stream=True).raw
|
| 123 |
+
input_image = Image.open(input_image).convert("RGBA").resize((1024, 1024))
|
| 124 |
+
input_image.save("image_input.png")
|
| 125 |
+
images = pipe(
|
| 126 |
+
prompt,
|
| 127 |
+
seed=0,
|
| 128 |
+
num_inference_steps=30, cfg_scale=4,
|
| 129 |
+
height=1024, width=1024,
|
| 130 |
+
layer_input_image=input_image,
|
| 131 |
+
layer_num=0,
|
| 132 |
+
)
|
| 133 |
+
images[0].save("image.png")
|
| 134 |
+
```
|
README_from_modelscope.md
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
frameworks: PyTorch
|
| 3 |
+
license: Apache License 2.0
|
| 4 |
+
tags: []
|
| 5 |
+
tasks:
|
| 6 |
+
- text-to-image-synthesis
|
| 7 |
+
base_model:
|
| 8 |
+
- Qwen/Qwen-Image-Layered
|
| 9 |
+
base_model_relation: finetune
|
| 10 |
+
---
|
| 11 |
+
# Qwen-Image-Layered
|
| 12 |
+
|
| 13 |
+
## 模型介绍
|
| 14 |
+
|
| 15 |
+
本模型基于模型 [Qwen/Qwen-Image-Layered](https://modelscope.cn/models/Qwen/Qwen-Image-Layered) 在数据集 [artplus/PrismLayersPro](https://modelscope.cn/datasets/artplus/PrismLayersPro) 上进行了训练,可以通过文本控制拆分的图层内容。
|
| 16 |
+
|
| 17 |
+
## 使用技巧
|
| 18 |
+
|
| 19 |
+
* 模型结构从多图输出改为了单图输出,仅输出与文本描述相关的图层
|
| 20 |
+
* 模型只用英文文本训练过,但仍从基础模型继承了中文理解能力
|
| 21 |
+
* 模型训练的原生分辨率是1024x1024,支持以其他分辨率进行推理
|
| 22 |
+
* 模型难以拆分“互相遮挡”的多个实体,例如样例中的卡通骷髅头和帽子
|
| 23 |
+
* 模型擅长拆分海报图层,不擅长拆分摄影图像,尤其是存在光影的照片
|
| 24 |
+
* 模型支持负向提示词,可以通过负向提示词描述不希望出现在结果的内容
|
| 25 |
+
|
| 26 |
+
## 效果展示
|
| 27 |
+
|
| 28 |
+
**部分图片为纯白色文本,魔搭社区用户请点击页面右上角的“☀︎”切换到暗色模式**
|
| 29 |
+
|
| 30 |
+
### 样例1
|
| 31 |
+
|
| 32 |
+
<div style="display: flex; justify-content: space-between;">
|
| 33 |
+
|
| 34 |
+
<div style="width: 30%;">
|
| 35 |
+
|
| 36 |
+
|输入图|
|
| 37 |
+
|-|
|
| 38 |
+
||
|
| 39 |
+
|
| 40 |
+
</div>
|
| 41 |
+
|
| 42 |
+
<div style="width: 66%;">
|
| 43 |
+
|
| 44 |
+
|提示词|输出图|提示词|输出图|
|
| 45 |
+
|-|-|-|-|
|
| 46 |
+
|A solid, uniform color with no distinguishable features or objects||Text 'TRICK'||
|
| 47 |
+
|Cloud||Text 'TRICK OR TREAT'||
|
| 48 |
+
|A cartoon skeleton character wearing a purple hat and holding a gift box||Text 'TRICK OR'||
|
| 49 |
+
|A purple hat and a head||A gift box||
|
| 50 |
+
|
| 51 |
+
</div>
|
| 52 |
+
|
| 53 |
+
</div>
|
| 54 |
+
|
| 55 |
+
### 样例2
|
| 56 |
+
|
| 57 |
+
<div style="display: flex; justify-content: space-between;">
|
| 58 |
+
|
| 59 |
+
<div style="width: 30%;">
|
| 60 |
+
|
| 61 |
+
|输入图|
|
| 62 |
+
|-|
|
| 63 |
+
||
|
| 64 |
+
|
| 65 |
+
</div>
|
| 66 |
+
|
| 67 |
+
<div style="width: 66%;">
|
| 68 |
+
|
| 69 |
+
|提示词|输出图|提示词|输出图|
|
| 70 |
+
|-|-|-|-|
|
| 71 |
+
|蓝天,白云,一片花园,花园里有五颜六色的花||五彩的精致花环||
|
| 72 |
+
|少女、花环、小猫||少女、小猫||
|
| 73 |
+
|
| 74 |
+
</div>
|
| 75 |
+
|
| 76 |
+
</div>
|
| 77 |
+
|
| 78 |
+
### 样例3
|
| 79 |
+
|
| 80 |
+
<div style="display: flex; justify-content: space-between;">
|
| 81 |
+
|
| 82 |
+
<div style="width: 30%;">
|
| 83 |
+
|
| 84 |
+
|输入图|
|
| 85 |
+
|-|
|
| 86 |
+
||
|
| 87 |
+
|
| 88 |
+
</div>
|
| 89 |
+
|
| 90 |
+
<div style="width: 66%;">
|
| 91 |
+
|
| 92 |
+
|提示词|输出图|提示词|输出图|
|
| 93 |
+
|-|-|-|-|
|
| 94 |
+
|一片湛蓝的天空和波涛汹涌的大海||文字“向往的生活”||
|
| 95 |
+
|一只海鸥||文字“生活”||
|
| 96 |
+
|
| 97 |
+
</div>
|
| 98 |
+
|
| 99 |
+
</div>
|
| 100 |
+
|
| 101 |
+
## 推理代码
|
| 102 |
+
|
| 103 |
+
安装 DiffSynth-Studio:
|
| 104 |
+
|
| 105 |
+
```
|
| 106 |
+
git clone https://github.com/modelscope/DiffSynth-Studio.git
|
| 107 |
+
cd DiffSynth-Studio
|
| 108 |
+
pip install -e .
|
| 109 |
+
```
|
| 110 |
+
|
| 111 |
+
模型推理:
|
| 112 |
+
|
| 113 |
+
```python
|
| 114 |
+
from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
|
| 115 |
+
from PIL import Image
|
| 116 |
+
import torch, requests
|
| 117 |
+
|
| 118 |
+
pipe = QwenImagePipeline.from_pretrained(
|
| 119 |
+
torch_dtype=torch.bfloat16,
|
| 120 |
+
device="cuda",
|
| 121 |
+
model_configs=[
|
| 122 |
+
ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Layered-Control", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
|
| 123 |
+
ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
|
| 124 |
+
ModelConfig(model_id="Qwen/Qwen-Image-Layered", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
|
| 125 |
+
],
|
| 126 |
+
processor_config=ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="processor/"),
|
| 127 |
+
)
|
| 128 |
+
prompt = "A cartoon skeleton character wearing a purple hat and holding a gift box"
|
| 129 |
+
input_image = requests.get("https://modelscope.oss-cn-beijing.aliyuncs.com/resource/images/trick_or_treat.png", stream=True).raw
|
| 130 |
+
input_image = Image.open(input_image).convert("RGBA").resize((1024, 1024))
|
| 131 |
+
input_image.save("image_input.png")
|
| 132 |
+
images = pipe(
|
| 133 |
+
prompt,
|
| 134 |
+
seed=0,
|
| 135 |
+
num_inference_steps=30, cfg_scale=4,
|
| 136 |
+
height=1024, width=1024,
|
| 137 |
+
layer_input_image=input_image,
|
| 138 |
+
layer_num=0,
|
| 139 |
+
)
|
| 140 |
+
images[0].save("image.png")
|
| 141 |
+
```
|
assets/image_1_0_0.png
ADDED
|
Git LFS Details
|
assets/image_1_1_0.png
ADDED
|
Git LFS Details
|
assets/image_1_2_0.png
ADDED
|
Git LFS Details
|
assets/image_1_3_0.png
ADDED
|
Git LFS Details
|
assets/image_1_4_0.png
ADDED
|
Git LFS Details
|
assets/image_1_5_0.png
ADDED
|
Git LFS Details
|
assets/image_1_6_0.png
ADDED
|
Git LFS Details
|
assets/image_1_7_0.png
ADDED
|
Git LFS Details
|
assets/image_1_input.png
ADDED
|
Git LFS Details
|
assets/image_2_0_0.png
ADDED
|
Git LFS Details
|
assets/image_2_1_0.png
ADDED
|
Git LFS Details
|
assets/image_2_2_0.png
ADDED
|
Git LFS Details
|
assets/image_2_3_0.png
ADDED
|
Git LFS Details
|
assets/image_2_input.png
ADDED
|
Git LFS Details
|
assets/image_3_0_0.png
ADDED
|
Git LFS Details
|
assets/image_3_1_0.png
ADDED
|
Git LFS Details
|
assets/image_3_2_0.png
ADDED
|
Git LFS Details
|
assets/image_3_3_0.png
ADDED
|
Git LFS Details
|
assets/image_3_input.png
ADDED
|
Git LFS Details
|
configuration.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"framework":"Pytorch","task":"text-to-image-synthesis"}
|
transformer/config.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "QwenImageTransformer2DModel",
|
| 3 |
+
"_diffusers_version": "0.36.0.dev0",
|
| 4 |
+
"use_additional_t_cond": true,
|
| 5 |
+
"attention_head_dim": 128,
|
| 6 |
+
"axes_dims_rope": [
|
| 7 |
+
16,
|
| 8 |
+
56,
|
| 9 |
+
56
|
| 10 |
+
],
|
| 11 |
+
"guidance_embeds": false,
|
| 12 |
+
"in_channels": 64,
|
| 13 |
+
"joint_attention_dim": 3584,
|
| 14 |
+
"num_attention_heads": 24,
|
| 15 |
+
"num_layers": 60,
|
| 16 |
+
"out_channels": 16,
|
| 17 |
+
"patch_size": 2,
|
| 18 |
+
"use_layer3d_rope": true,
|
| 19 |
+
"zero_cond_t": false
|
| 20 |
+
}
|
transformer/diffusion_pytorch_model-00001-of-00005.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e5353f1dbff8445840012bd2aff2fd209034aa42d0ce623a55f3f542036244a2
|
| 3 |
+
size 9973590960
|
transformer/diffusion_pytorch_model-00002-of-00005.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:957d266a7ccdcc9d3f225c82b0afa831ba5084c851b86934b9e4e9f10163b985
|
| 3 |
+
size 9987326040
|
transformer/diffusion_pytorch_model-00003-of-00005.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1f0e2bec2869de66f02b53bda77bc11618aba229453be56170209a654ddff0c0
|
| 3 |
+
size 9987307408
|
transformer/diffusion_pytorch_model-00004-of-00005.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5244cf56dd45667fc8f373d43550bc187909bc48489f380fa3dcbb02901e7dcf
|
| 3 |
+
size 9930685680
|
transformer/diffusion_pytorch_model-00005-of-00005.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:45ecb944aad539ceaae9e3ba99dc9f2d650ba034cf4b305b0e83ebce0bb7b55c
|
| 3 |
+
size 982130448
|