Commit
•
d206bc1
1
Parent(s):
b59dd07
initial commit (#1)
Browse files- initial commit (15c62e41e9c63868998839b43d674216a8d05a33)
Co-authored-by: Shiwei Zhang <StevenZhang@users.noreply.huggingface.co>
- README.md +54 -0
- assets/generated_logo.jpg +0 -0
- model/Edgen_1024px_v1.pth +3 -0
- model/sd-vae-ft-ema/config.json +29 -0
- model/sd-vae-ft-ema/diffusion_pytorch_model.bin +3 -0
- model/t5-v1_1-xxl/config.json +31 -0
- model/t5-v1_1-xxl/pytorch_model-00001-of-00002.bin +3 -0
- model/t5-v1_1-xxl/pytorch_model-00002-of-00002.bin +3 -0
- model/t5-v1_1-xxl/pytorch_model.bin.index.json +227 -0
- model/t5-v1_1-xxl/special_tokens_map.json +1 -0
- model/t5-v1_1-xxl/spiece.model +3 -0
- model/t5-v1_1-xxl/tokenizer_config.json +1 -0
- requirements.txt +22 -0
README.md
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<p align="center">
|
2 |
+
<img src="assets/generated_logo.jpg" height=120>
|
3 |
+
</p>
|
4 |
+
|
5 |
+
### <div align="center">EvolveDirector: Approaching Advanced Text-to-Image Generation with Large Vision-Language Models<div>
|
6 |
+
|
7 |
+
---
|
8 |
+
|
9 |
+
This is the official model weights of the model ''Edgen'' trained by EvolveDirector. For more datails, please refer to our paper and code repo.
|
10 |
+
|
11 |
+
|
12 |
+
## Setup
|
13 |
+
|
14 |
+
### Requirements
|
15 |
+
|
16 |
+
1. Build virtual environment for EvolveDirector
|
17 |
+
```shell
|
18 |
+
# create virtual environment for EvolveDirector
|
19 |
+
conda create -n evolvedirector python=3.9
|
20 |
+
conda activate evolvedirector
|
21 |
+
|
22 |
+
# cd to the path of this repo
|
23 |
+
|
24 |
+
# install packages
|
25 |
+
pip install --upgrade pip
|
26 |
+
pip install torch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 --index-url https://download.pytorch.org/whl/cu121
|
27 |
+
pip install -r requirements.txt
|
28 |
+
pip install -U transformers accelerate diffusers SentencePiece ftfy beautifulsoup4
|
29 |
+
```
|
30 |
+
|
31 |
+
## Usage
|
32 |
+
|
33 |
+
1. Inference
|
34 |
+
```shell
|
35 |
+
python Inference/inference.py --image_size=1024 \
|
36 |
+
--t5_path "./model" \
|
37 |
+
--tokenizer_path "./model/sd-vae-ft-ema" \
|
38 |
+
--txt_file "text_prompts.txt" \ # put your text prompts in this file
|
39 |
+
--model_path "model/Edgen_1024px_v1.pth" \
|
40 |
+
--save_folder "output/test_model"
|
41 |
+
```
|
42 |
+
|
43 |
+
|
44 |
+
## Citation
|
45 |
+
|
46 |
+
|
47 |
+
```bibtex
|
48 |
+
|
49 |
+
|
50 |
+
```
|
51 |
+
|
52 |
+
## Shoutouts
|
53 |
+
|
54 |
+
- This code builds heavily on [PixArt-$\alpha$](https://github.com/PixArt-alpha/PixArt-alpha/). Thanks for open-sourcing!
|
assets/generated_logo.jpg
ADDED
model/Edgen_1024px_v1.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2e752b457f7b7bb84a77d0c77c61c85c2a312f9fd1b48725363373c8f3f3f423
|
3 |
+
size 2466408960
|
model/sd-vae-ft-ema/config.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "AutoencoderKL",
|
3 |
+
"_diffusers_version": "0.4.2",
|
4 |
+
"act_fn": "silu",
|
5 |
+
"block_out_channels": [
|
6 |
+
128,
|
7 |
+
256,
|
8 |
+
512,
|
9 |
+
512
|
10 |
+
],
|
11 |
+
"down_block_types": [
|
12 |
+
"DownEncoderBlock2D",
|
13 |
+
"DownEncoderBlock2D",
|
14 |
+
"DownEncoderBlock2D",
|
15 |
+
"DownEncoderBlock2D"
|
16 |
+
],
|
17 |
+
"in_channels": 3,
|
18 |
+
"latent_channels": 4,
|
19 |
+
"layers_per_block": 2,
|
20 |
+
"norm_num_groups": 32,
|
21 |
+
"out_channels": 3,
|
22 |
+
"sample_size": 256,
|
23 |
+
"up_block_types": [
|
24 |
+
"UpDecoderBlock2D",
|
25 |
+
"UpDecoderBlock2D",
|
26 |
+
"UpDecoderBlock2D",
|
27 |
+
"UpDecoderBlock2D"
|
28 |
+
]
|
29 |
+
}
|
model/sd-vae-ft-ema/diffusion_pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7c98ebcd7ca5cb69d47b2ae287feba0695689fbf2c8fead2fab05fd3e0c28303
|
3 |
+
size 334707217
|
model/t5-v1_1-xxl/config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "google/t5-v1_1-xxl",
|
3 |
+
"architectures": [
|
4 |
+
"T5EncoderModel"
|
5 |
+
],
|
6 |
+
"d_ff": 10240,
|
7 |
+
"d_kv": 64,
|
8 |
+
"d_model": 4096,
|
9 |
+
"decoder_start_token_id": 0,
|
10 |
+
"dense_act_fn": "gelu_new",
|
11 |
+
"dropout_rate": 0.1,
|
12 |
+
"eos_token_id": 1,
|
13 |
+
"feed_forward_proj": "gated-gelu",
|
14 |
+
"initializer_factor": 1.0,
|
15 |
+
"is_encoder_decoder": true,
|
16 |
+
"is_gated_act": true,
|
17 |
+
"layer_norm_epsilon": 1e-06,
|
18 |
+
"model_type": "t5",
|
19 |
+
"num_decoder_layers": 24,
|
20 |
+
"num_heads": 64,
|
21 |
+
"num_layers": 24,
|
22 |
+
"output_past": true,
|
23 |
+
"pad_token_id": 0,
|
24 |
+
"relative_attention_max_distance": 128,
|
25 |
+
"relative_attention_num_buckets": 32,
|
26 |
+
"tie_word_embeddings": false,
|
27 |
+
"torch_dtype": "float32",
|
28 |
+
"transformers_version": "4.21.1",
|
29 |
+
"use_cache": true,
|
30 |
+
"vocab_size": 32128
|
31 |
+
}
|
model/t5-v1_1-xxl/pytorch_model-00001-of-00002.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2f71ad0624095dae788b1023081dda1b4040bd24f7244a5b5b46eebc09825839
|
3 |
+
size 9452285635
|
model/t5-v1_1-xxl/pytorch_model-00002-of-00002.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4f68f80678299ac59f69b3550ebd47b966571920d8f9e71f42ab61fabaaed868
|
3 |
+
size 9597031749
|
model/t5-v1_1-xxl/pytorch_model.bin.index.json
ADDED
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 19575627776
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"encoder.block.0.layer.0.SelfAttention.k.weight": "pytorch_model-00001-of-00002.bin",
|
7 |
+
"encoder.block.0.layer.0.SelfAttention.o.weight": "pytorch_model-00001-of-00002.bin",
|
8 |
+
"encoder.block.0.layer.0.SelfAttention.q.weight": "pytorch_model-00001-of-00002.bin",
|
9 |
+
"encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight": "pytorch_model-00001-of-00002.bin",
|
10 |
+
"encoder.block.0.layer.0.SelfAttention.v.weight": "pytorch_model-00001-of-00002.bin",
|
11 |
+
"encoder.block.0.layer.0.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
12 |
+
"encoder.block.0.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00001-of-00002.bin",
|
13 |
+
"encoder.block.0.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00001-of-00002.bin",
|
14 |
+
"encoder.block.0.layer.1.DenseReluDense.wo.weight": "pytorch_model-00001-of-00002.bin",
|
15 |
+
"encoder.block.0.layer.1.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
16 |
+
"encoder.block.1.layer.0.SelfAttention.k.weight": "pytorch_model-00001-of-00002.bin",
|
17 |
+
"encoder.block.1.layer.0.SelfAttention.o.weight": "pytorch_model-00001-of-00002.bin",
|
18 |
+
"encoder.block.1.layer.0.SelfAttention.q.weight": "pytorch_model-00001-of-00002.bin",
|
19 |
+
"encoder.block.1.layer.0.SelfAttention.v.weight": "pytorch_model-00001-of-00002.bin",
|
20 |
+
"encoder.block.1.layer.0.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
21 |
+
"encoder.block.1.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00001-of-00002.bin",
|
22 |
+
"encoder.block.1.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00001-of-00002.bin",
|
23 |
+
"encoder.block.1.layer.1.DenseReluDense.wo.weight": "pytorch_model-00001-of-00002.bin",
|
24 |
+
"encoder.block.1.layer.1.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
25 |
+
"encoder.block.10.layer.0.SelfAttention.k.weight": "pytorch_model-00001-of-00002.bin",
|
26 |
+
"encoder.block.10.layer.0.SelfAttention.o.weight": "pytorch_model-00001-of-00002.bin",
|
27 |
+
"encoder.block.10.layer.0.SelfAttention.q.weight": "pytorch_model-00001-of-00002.bin",
|
28 |
+
"encoder.block.10.layer.0.SelfAttention.v.weight": "pytorch_model-00001-of-00002.bin",
|
29 |
+
"encoder.block.10.layer.0.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
30 |
+
"encoder.block.10.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00001-of-00002.bin",
|
31 |
+
"encoder.block.10.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00001-of-00002.bin",
|
32 |
+
"encoder.block.10.layer.1.DenseReluDense.wo.weight": "pytorch_model-00001-of-00002.bin",
|
33 |
+
"encoder.block.10.layer.1.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
34 |
+
"encoder.block.11.layer.0.SelfAttention.k.weight": "pytorch_model-00001-of-00002.bin",
|
35 |
+
"encoder.block.11.layer.0.SelfAttention.o.weight": "pytorch_model-00001-of-00002.bin",
|
36 |
+
"encoder.block.11.layer.0.SelfAttention.q.weight": "pytorch_model-00001-of-00002.bin",
|
37 |
+
"encoder.block.11.layer.0.SelfAttention.v.weight": "pytorch_model-00001-of-00002.bin",
|
38 |
+
"encoder.block.11.layer.0.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
39 |
+
"encoder.block.11.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00001-of-00002.bin",
|
40 |
+
"encoder.block.11.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00002-of-00002.bin",
|
41 |
+
"encoder.block.11.layer.1.DenseReluDense.wo.weight": "pytorch_model-00002-of-00002.bin",
|
42 |
+
"encoder.block.11.layer.1.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
43 |
+
"encoder.block.12.layer.0.SelfAttention.k.weight": "pytorch_model-00002-of-00002.bin",
|
44 |
+
"encoder.block.12.layer.0.SelfAttention.o.weight": "pytorch_model-00002-of-00002.bin",
|
45 |
+
"encoder.block.12.layer.0.SelfAttention.q.weight": "pytorch_model-00002-of-00002.bin",
|
46 |
+
"encoder.block.12.layer.0.SelfAttention.v.weight": "pytorch_model-00002-of-00002.bin",
|
47 |
+
"encoder.block.12.layer.0.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
48 |
+
"encoder.block.12.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00002-of-00002.bin",
|
49 |
+
"encoder.block.12.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00002-of-00002.bin",
|
50 |
+
"encoder.block.12.layer.1.DenseReluDense.wo.weight": "pytorch_model-00002-of-00002.bin",
|
51 |
+
"encoder.block.12.layer.1.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
52 |
+
"encoder.block.13.layer.0.SelfAttention.k.weight": "pytorch_model-00002-of-00002.bin",
|
53 |
+
"encoder.block.13.layer.0.SelfAttention.o.weight": "pytorch_model-00002-of-00002.bin",
|
54 |
+
"encoder.block.13.layer.0.SelfAttention.q.weight": "pytorch_model-00002-of-00002.bin",
|
55 |
+
"encoder.block.13.layer.0.SelfAttention.v.weight": "pytorch_model-00002-of-00002.bin",
|
56 |
+
"encoder.block.13.layer.0.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
57 |
+
"encoder.block.13.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00002-of-00002.bin",
|
58 |
+
"encoder.block.13.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00002-of-00002.bin",
|
59 |
+
"encoder.block.13.layer.1.DenseReluDense.wo.weight": "pytorch_model-00002-of-00002.bin",
|
60 |
+
"encoder.block.13.layer.1.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
61 |
+
"encoder.block.14.layer.0.SelfAttention.k.weight": "pytorch_model-00002-of-00002.bin",
|
62 |
+
"encoder.block.14.layer.0.SelfAttention.o.weight": "pytorch_model-00002-of-00002.bin",
|
63 |
+
"encoder.block.14.layer.0.SelfAttention.q.weight": "pytorch_model-00002-of-00002.bin",
|
64 |
+
"encoder.block.14.layer.0.SelfAttention.v.weight": "pytorch_model-00002-of-00002.bin",
|
65 |
+
"encoder.block.14.layer.0.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
66 |
+
"encoder.block.14.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00002-of-00002.bin",
|
67 |
+
"encoder.block.14.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00002-of-00002.bin",
|
68 |
+
"encoder.block.14.layer.1.DenseReluDense.wo.weight": "pytorch_model-00002-of-00002.bin",
|
69 |
+
"encoder.block.14.layer.1.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
70 |
+
"encoder.block.15.layer.0.SelfAttention.k.weight": "pytorch_model-00002-of-00002.bin",
|
71 |
+
"encoder.block.15.layer.0.SelfAttention.o.weight": "pytorch_model-00002-of-00002.bin",
|
72 |
+
"encoder.block.15.layer.0.SelfAttention.q.weight": "pytorch_model-00002-of-00002.bin",
|
73 |
+
"encoder.block.15.layer.0.SelfAttention.v.weight": "pytorch_model-00002-of-00002.bin",
|
74 |
+
"encoder.block.15.layer.0.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
75 |
+
"encoder.block.15.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00002-of-00002.bin",
|
76 |
+
"encoder.block.15.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00002-of-00002.bin",
|
77 |
+
"encoder.block.15.layer.1.DenseReluDense.wo.weight": "pytorch_model-00002-of-00002.bin",
|
78 |
+
"encoder.block.15.layer.1.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
79 |
+
"encoder.block.16.layer.0.SelfAttention.k.weight": "pytorch_model-00002-of-00002.bin",
|
80 |
+
"encoder.block.16.layer.0.SelfAttention.o.weight": "pytorch_model-00002-of-00002.bin",
|
81 |
+
"encoder.block.16.layer.0.SelfAttention.q.weight": "pytorch_model-00002-of-00002.bin",
|
82 |
+
"encoder.block.16.layer.0.SelfAttention.v.weight": "pytorch_model-00002-of-00002.bin",
|
83 |
+
"encoder.block.16.layer.0.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
84 |
+
"encoder.block.16.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00002-of-00002.bin",
|
85 |
+
"encoder.block.16.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00002-of-00002.bin",
|
86 |
+
"encoder.block.16.layer.1.DenseReluDense.wo.weight": "pytorch_model-00002-of-00002.bin",
|
87 |
+
"encoder.block.16.layer.1.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
88 |
+
"encoder.block.17.layer.0.SelfAttention.k.weight": "pytorch_model-00002-of-00002.bin",
|
89 |
+
"encoder.block.17.layer.0.SelfAttention.o.weight": "pytorch_model-00002-of-00002.bin",
|
90 |
+
"encoder.block.17.layer.0.SelfAttention.q.weight": "pytorch_model-00002-of-00002.bin",
|
91 |
+
"encoder.block.17.layer.0.SelfAttention.v.weight": "pytorch_model-00002-of-00002.bin",
|
92 |
+
"encoder.block.17.layer.0.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
93 |
+
"encoder.block.17.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00002-of-00002.bin",
|
94 |
+
"encoder.block.17.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00002-of-00002.bin",
|
95 |
+
"encoder.block.17.layer.1.DenseReluDense.wo.weight": "pytorch_model-00002-of-00002.bin",
|
96 |
+
"encoder.block.17.layer.1.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
97 |
+
"encoder.block.18.layer.0.SelfAttention.k.weight": "pytorch_model-00002-of-00002.bin",
|
98 |
+
"encoder.block.18.layer.0.SelfAttention.o.weight": "pytorch_model-00002-of-00002.bin",
|
99 |
+
"encoder.block.18.layer.0.SelfAttention.q.weight": "pytorch_model-00002-of-00002.bin",
|
100 |
+
"encoder.block.18.layer.0.SelfAttention.v.weight": "pytorch_model-00002-of-00002.bin",
|
101 |
+
"encoder.block.18.layer.0.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
102 |
+
"encoder.block.18.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00002-of-00002.bin",
|
103 |
+
"encoder.block.18.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00002-of-00002.bin",
|
104 |
+
"encoder.block.18.layer.1.DenseReluDense.wo.weight": "pytorch_model-00002-of-00002.bin",
|
105 |
+
"encoder.block.18.layer.1.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
106 |
+
"encoder.block.19.layer.0.SelfAttention.k.weight": "pytorch_model-00002-of-00002.bin",
|
107 |
+
"encoder.block.19.layer.0.SelfAttention.o.weight": "pytorch_model-00002-of-00002.bin",
|
108 |
+
"encoder.block.19.layer.0.SelfAttention.q.weight": "pytorch_model-00002-of-00002.bin",
|
109 |
+
"encoder.block.19.layer.0.SelfAttention.v.weight": "pytorch_model-00002-of-00002.bin",
|
110 |
+
"encoder.block.19.layer.0.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
111 |
+
"encoder.block.19.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00002-of-00002.bin",
|
112 |
+
"encoder.block.19.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00002-of-00002.bin",
|
113 |
+
"encoder.block.19.layer.1.DenseReluDense.wo.weight": "pytorch_model-00002-of-00002.bin",
|
114 |
+
"encoder.block.19.layer.1.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
115 |
+
"encoder.block.2.layer.0.SelfAttention.k.weight": "pytorch_model-00001-of-00002.bin",
|
116 |
+
"encoder.block.2.layer.0.SelfAttention.o.weight": "pytorch_model-00001-of-00002.bin",
|
117 |
+
"encoder.block.2.layer.0.SelfAttention.q.weight": "pytorch_model-00001-of-00002.bin",
|
118 |
+
"encoder.block.2.layer.0.SelfAttention.v.weight": "pytorch_model-00001-of-00002.bin",
|
119 |
+
"encoder.block.2.layer.0.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
120 |
+
"encoder.block.2.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00001-of-00002.bin",
|
121 |
+
"encoder.block.2.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00001-of-00002.bin",
|
122 |
+
"encoder.block.2.layer.1.DenseReluDense.wo.weight": "pytorch_model-00001-of-00002.bin",
|
123 |
+
"encoder.block.2.layer.1.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
124 |
+
"encoder.block.20.layer.0.SelfAttention.k.weight": "pytorch_model-00002-of-00002.bin",
|
125 |
+
"encoder.block.20.layer.0.SelfAttention.o.weight": "pytorch_model-00002-of-00002.bin",
|
126 |
+
"encoder.block.20.layer.0.SelfAttention.q.weight": "pytorch_model-00002-of-00002.bin",
|
127 |
+
"encoder.block.20.layer.0.SelfAttention.v.weight": "pytorch_model-00002-of-00002.bin",
|
128 |
+
"encoder.block.20.layer.0.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
129 |
+
"encoder.block.20.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00002-of-00002.bin",
|
130 |
+
"encoder.block.20.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00002-of-00002.bin",
|
131 |
+
"encoder.block.20.layer.1.DenseReluDense.wo.weight": "pytorch_model-00002-of-00002.bin",
|
132 |
+
"encoder.block.20.layer.1.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
133 |
+
"encoder.block.21.layer.0.SelfAttention.k.weight": "pytorch_model-00002-of-00002.bin",
|
134 |
+
"encoder.block.21.layer.0.SelfAttention.o.weight": "pytorch_model-00002-of-00002.bin",
|
135 |
+
"encoder.block.21.layer.0.SelfAttention.q.weight": "pytorch_model-00002-of-00002.bin",
|
136 |
+
"encoder.block.21.layer.0.SelfAttention.v.weight": "pytorch_model-00002-of-00002.bin",
|
137 |
+
"encoder.block.21.layer.0.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
138 |
+
"encoder.block.21.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00002-of-00002.bin",
|
139 |
+
"encoder.block.21.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00002-of-00002.bin",
|
140 |
+
"encoder.block.21.layer.1.DenseReluDense.wo.weight": "pytorch_model-00002-of-00002.bin",
|
141 |
+
"encoder.block.21.layer.1.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
142 |
+
"encoder.block.22.layer.0.SelfAttention.k.weight": "pytorch_model-00002-of-00002.bin",
|
143 |
+
"encoder.block.22.layer.0.SelfAttention.o.weight": "pytorch_model-00002-of-00002.bin",
|
144 |
+
"encoder.block.22.layer.0.SelfAttention.q.weight": "pytorch_model-00002-of-00002.bin",
|
145 |
+
"encoder.block.22.layer.0.SelfAttention.v.weight": "pytorch_model-00002-of-00002.bin",
|
146 |
+
"encoder.block.22.layer.0.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
147 |
+
"encoder.block.22.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00002-of-00002.bin",
|
148 |
+
"encoder.block.22.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00002-of-00002.bin",
|
149 |
+
"encoder.block.22.layer.1.DenseReluDense.wo.weight": "pytorch_model-00002-of-00002.bin",
|
150 |
+
"encoder.block.22.layer.1.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
151 |
+
"encoder.block.23.layer.0.SelfAttention.k.weight": "pytorch_model-00002-of-00002.bin",
|
152 |
+
"encoder.block.23.layer.0.SelfAttention.o.weight": "pytorch_model-00002-of-00002.bin",
|
153 |
+
"encoder.block.23.layer.0.SelfAttention.q.weight": "pytorch_model-00002-of-00002.bin",
|
154 |
+
"encoder.block.23.layer.0.SelfAttention.v.weight": "pytorch_model-00002-of-00002.bin",
|
155 |
+
"encoder.block.23.layer.0.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
156 |
+
"encoder.block.23.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00002-of-00002.bin",
|
157 |
+
"encoder.block.23.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00002-of-00002.bin",
|
158 |
+
"encoder.block.23.layer.1.DenseReluDense.wo.weight": "pytorch_model-00002-of-00002.bin",
|
159 |
+
"encoder.block.23.layer.1.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
160 |
+
"encoder.block.3.layer.0.SelfAttention.k.weight": "pytorch_model-00001-of-00002.bin",
|
161 |
+
"encoder.block.3.layer.0.SelfAttention.o.weight": "pytorch_model-00001-of-00002.bin",
|
162 |
+
"encoder.block.3.layer.0.SelfAttention.q.weight": "pytorch_model-00001-of-00002.bin",
|
163 |
+
"encoder.block.3.layer.0.SelfAttention.v.weight": "pytorch_model-00001-of-00002.bin",
|
164 |
+
"encoder.block.3.layer.0.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
165 |
+
"encoder.block.3.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00001-of-00002.bin",
|
166 |
+
"encoder.block.3.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00001-of-00002.bin",
|
167 |
+
"encoder.block.3.layer.1.DenseReluDense.wo.weight": "pytorch_model-00001-of-00002.bin",
|
168 |
+
"encoder.block.3.layer.1.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
169 |
+
"encoder.block.4.layer.0.SelfAttention.k.weight": "pytorch_model-00001-of-00002.bin",
|
170 |
+
"encoder.block.4.layer.0.SelfAttention.o.weight": "pytorch_model-00001-of-00002.bin",
|
171 |
+
"encoder.block.4.layer.0.SelfAttention.q.weight": "pytorch_model-00001-of-00002.bin",
|
172 |
+
"encoder.block.4.layer.0.SelfAttention.v.weight": "pytorch_model-00001-of-00002.bin",
|
173 |
+
"encoder.block.4.layer.0.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
174 |
+
"encoder.block.4.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00001-of-00002.bin",
|
175 |
+
"encoder.block.4.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00001-of-00002.bin",
|
176 |
+
"encoder.block.4.layer.1.DenseReluDense.wo.weight": "pytorch_model-00001-of-00002.bin",
|
177 |
+
"encoder.block.4.layer.1.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
178 |
+
"encoder.block.5.layer.0.SelfAttention.k.weight": "pytorch_model-00001-of-00002.bin",
|
179 |
+
"encoder.block.5.layer.0.SelfAttention.o.weight": "pytorch_model-00001-of-00002.bin",
|
180 |
+
"encoder.block.5.layer.0.SelfAttention.q.weight": "pytorch_model-00001-of-00002.bin",
|
181 |
+
"encoder.block.5.layer.0.SelfAttention.v.weight": "pytorch_model-00001-of-00002.bin",
|
182 |
+
"encoder.block.5.layer.0.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
183 |
+
"encoder.block.5.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00001-of-00002.bin",
|
184 |
+
"encoder.block.5.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00001-of-00002.bin",
|
185 |
+
"encoder.block.5.layer.1.DenseReluDense.wo.weight": "pytorch_model-00001-of-00002.bin",
|
186 |
+
"encoder.block.5.layer.1.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
187 |
+
"encoder.block.6.layer.0.SelfAttention.k.weight": "pytorch_model-00001-of-00002.bin",
|
188 |
+
"encoder.block.6.layer.0.SelfAttention.o.weight": "pytorch_model-00001-of-00002.bin",
|
189 |
+
"encoder.block.6.layer.0.SelfAttention.q.weight": "pytorch_model-00001-of-00002.bin",
|
190 |
+
"encoder.block.6.layer.0.SelfAttention.v.weight": "pytorch_model-00001-of-00002.bin",
|
191 |
+
"encoder.block.6.layer.0.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
192 |
+
"encoder.block.6.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00001-of-00002.bin",
|
193 |
+
"encoder.block.6.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00001-of-00002.bin",
|
194 |
+
"encoder.block.6.layer.1.DenseReluDense.wo.weight": "pytorch_model-00001-of-00002.bin",
|
195 |
+
"encoder.block.6.layer.1.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
196 |
+
"encoder.block.7.layer.0.SelfAttention.k.weight": "pytorch_model-00001-of-00002.bin",
|
197 |
+
"encoder.block.7.layer.0.SelfAttention.o.weight": "pytorch_model-00001-of-00002.bin",
|
198 |
+
"encoder.block.7.layer.0.SelfAttention.q.weight": "pytorch_model-00001-of-00002.bin",
|
199 |
+
"encoder.block.7.layer.0.SelfAttention.v.weight": "pytorch_model-00001-of-00002.bin",
|
200 |
+
"encoder.block.7.layer.0.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
201 |
+
"encoder.block.7.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00001-of-00002.bin",
|
202 |
+
"encoder.block.7.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00001-of-00002.bin",
|
203 |
+
"encoder.block.7.layer.1.DenseReluDense.wo.weight": "pytorch_model-00001-of-00002.bin",
|
204 |
+
"encoder.block.7.layer.1.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
205 |
+
"encoder.block.8.layer.0.SelfAttention.k.weight": "pytorch_model-00001-of-00002.bin",
|
206 |
+
"encoder.block.8.layer.0.SelfAttention.o.weight": "pytorch_model-00001-of-00002.bin",
|
207 |
+
"encoder.block.8.layer.0.SelfAttention.q.weight": "pytorch_model-00001-of-00002.bin",
|
208 |
+
"encoder.block.8.layer.0.SelfAttention.v.weight": "pytorch_model-00001-of-00002.bin",
|
209 |
+
"encoder.block.8.layer.0.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
210 |
+
"encoder.block.8.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00001-of-00002.bin",
|
211 |
+
"encoder.block.8.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00001-of-00002.bin",
|
212 |
+
"encoder.block.8.layer.1.DenseReluDense.wo.weight": "pytorch_model-00001-of-00002.bin",
|
213 |
+
"encoder.block.8.layer.1.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
214 |
+
"encoder.block.9.layer.0.SelfAttention.k.weight": "pytorch_model-00001-of-00002.bin",
|
215 |
+
"encoder.block.9.layer.0.SelfAttention.o.weight": "pytorch_model-00001-of-00002.bin",
|
216 |
+
"encoder.block.9.layer.0.SelfAttention.q.weight": "pytorch_model-00001-of-00002.bin",
|
217 |
+
"encoder.block.9.layer.0.SelfAttention.v.weight": "pytorch_model-00001-of-00002.bin",
|
218 |
+
"encoder.block.9.layer.0.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
219 |
+
"encoder.block.9.layer.1.DenseReluDense.wi_0.weight": "pytorch_model-00001-of-00002.bin",
|
220 |
+
"encoder.block.9.layer.1.DenseReluDense.wi_1.weight": "pytorch_model-00001-of-00002.bin",
|
221 |
+
"encoder.block.9.layer.1.DenseReluDense.wo.weight": "pytorch_model-00001-of-00002.bin",
|
222 |
+
"encoder.block.9.layer.1.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
|
223 |
+
"encoder.embed_tokens.weight": "pytorch_model-00001-of-00002.bin",
|
224 |
+
"encoder.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
|
225 |
+
"shared.weight": "pytorch_model-00001-of-00002.bin"
|
226 |
+
}
|
227 |
+
}
|
model/t5-v1_1-xxl/special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"]}
|
model/t5-v1_1-xxl/spiece.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86
|
3 |
+
size 791656
|
model/t5-v1_1-xxl/tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "extra_ids": 100, "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"], "model_max_length": 512, "name_or_path": "t5-small"}
|
requirements.txt
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch==2.1.1
|
2 |
+
torchaudio==2.1.1
|
3 |
+
torchvision==0.16.1
|
4 |
+
mmcv==1.7.0
|
5 |
+
git+https://github.com/huggingface/diffusers
|
6 |
+
timm==0.6.12
|
7 |
+
accelerate
|
8 |
+
tensorboard
|
9 |
+
tensorboardX
|
10 |
+
transformers
|
11 |
+
sentencepiece~=0.1.99
|
12 |
+
ftfy
|
13 |
+
beautifulsoup4
|
14 |
+
protobuf==3.20.2
|
15 |
+
gradio==4.1.1
|
16 |
+
yapf==0.40.1
|
17 |
+
opencv-python
|
18 |
+
bs4
|
19 |
+
einops
|
20 |
+
xformers
|
21 |
+
optimum
|
22 |
+
peft==0.6.2
|