Upload 37 files
Browse files- models/blip/blip-image-captioning-base/.gitattributes +34 -0
- models/blip/blip-image-captioning-base/README.md +152 -0
- models/blip/blip-image-captioning-base/config.json +169 -0
- models/blip/blip-image-captioning-base/preprocessor_config.json +17 -0
- models/blip/blip-image-captioning-base/pytorch_model.bin +3 -0
- models/blip/blip-image-captioning-base/special_tokens_map.json +7 -0
- models/blip/blip-image-captioning-base/tokenizer.json +0 -0
- models/blip/blip-image-captioning-base/tokenizer_config.json +21 -0
- models/blip/blip-image-captioning-base/vocab.txt +0 -0
- models/i3d/i3d_pretrained_400.pt +3 -0
- models/inpaint_blocks/config.json +42 -0
- models/inpaint_blocks/control_net_inpaint.py +55 -0
- models/inpaint_blocks/diffusion_pytorch_model.bin +3 -0
- models/sd_blocks/feature_extractor/preprocessor_config.json +20 -0
- models/sd_blocks/model_index.json +32 -0
- models/sd_blocks/safety_checker/config.json +175 -0
- models/sd_blocks/safety_checker/model.safetensors +3 -0
- models/sd_blocks/safety_checker/pytorch_model.bin +3 -0
- models/sd_blocks/scheduler/scheduler_config.json +13 -0
- models/sd_blocks/text_encoder/config.json +25 -0
- models/sd_blocks/text_encoder/model.safetensors +3 -0
- models/sd_blocks/text_encoder/pytorch_model.bin +3 -0
- models/sd_blocks/tokenizer/merges.txt +0 -0
- models/sd_blocks/tokenizer/special_tokens_map.json +24 -0
- models/sd_blocks/tokenizer/tokenizer_config.json +34 -0
- models/sd_blocks/tokenizer/vocab.json +0 -0
- models/sd_blocks/unet/config.json +36 -0
- models/sd_blocks/unet/diffusion_pytorch_model.bin +3 -0
- models/sd_blocks/unet/diffusion_pytorch_model.safetensors +3 -0
- models/sd_blocks/vae/config.json +29 -0
- models/sd_blocks/vae/diffusion_pytorch_model.bin +3 -0
- models/sd_blocks/vae/diffusion_pytorch_model.fp16.bin +3 -0
- models/sd_blocks/vae/diffusion_pytorch_model.fp16.safetensors +3 -0
- models/sd_blocks/vae/diffusion_pytorch_model.safetensors +3 -0
- models/sd_vae_ft-mse/config.json +29 -0
- models/sd_vae_ft-mse/diffusion_pytorch_model.bin +3 -0
- models/temporal_blocks/mm_sd_v15_v2.ckpt +3 -0
models/blip/blip-image-captioning-base/.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
models/blip/blip-image-captioning-base/README.md
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
pipeline_tag: image-to-text
|
3 |
+
tags:
|
4 |
+
- image-captioning
|
5 |
+
languages:
|
6 |
+
- en
|
7 |
+
license: bsd-3-clause
|
8 |
+
---
|
9 |
+
|
10 |
+
# BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation
|
11 |
+
|
12 |
+
Model card for image captioning pretrained on COCO dataset - base architecture (with ViT base backbone).
|
13 |
+
|
14 |
+
| ![BLIP.gif](https://cdn-uploads.huggingface.co/production/uploads/1670928184033-62441d1d9fdefb55a0b7d12c.gif) |
|
15 |
+
|:--:|
|
16 |
+
| <b> Pull figure from BLIP official repo | Image source: https://github.com/salesforce/BLIP </b>|
|
17 |
+
|
18 |
+
## TL;DR
|
19 |
+
|
20 |
+
Authors from the [paper](https://arxiv.org/abs/2201.12086) write in the abstract:
|
21 |
+
|
22 |
+
*Vision-Language Pre-training (VLP) has advanced the performance for many vision-language tasks. However, most existing pre-trained models only excel in either understanding-based tasks or generation-based tasks. Furthermore, performance improvement has been largely achieved by scaling up the dataset with noisy image-text pairs collected from the web, which is a suboptimal source of supervision. In this paper, we propose BLIP, a new VLP framework which transfers flexibly to both vision-language understanding and generation tasks. BLIP effectively utilizes the noisy web data by bootstrapping the captions, where a captioner generates synthetic captions and a filter removes the noisy ones. We achieve state-of-the-art results on a wide range of vision-language tasks, such as image-text retrieval (+2.7% in average recall@1), image captioning (+2.8% in CIDEr), and VQA (+1.6% in VQA score). BLIP also demonstrates strong generalization ability when directly transferred to videolanguage tasks in a zero-shot manner. Code, models, and datasets are released.*
|
23 |
+
|
24 |
+
## Usage
|
25 |
+
|
26 |
+
You can use this model for conditional and un-conditional image captioning
|
27 |
+
|
28 |
+
### Using the Pytorch model
|
29 |
+
|
30 |
+
#### Running the model on CPU
|
31 |
+
|
32 |
+
<details>
|
33 |
+
<summary> Click to expand </summary>
|
34 |
+
|
35 |
+
```python
|
36 |
+
import requests
|
37 |
+
from PIL import Image
|
38 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
39 |
+
|
40 |
+
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
41 |
+
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
42 |
+
|
43 |
+
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
44 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
45 |
+
|
46 |
+
# conditional image captioning
|
47 |
+
text = "a photography of"
|
48 |
+
inputs = processor(raw_image, text, return_tensors="pt")
|
49 |
+
|
50 |
+
out = model.generate(**inputs)
|
51 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
52 |
+
# >>> a photography of a woman and her dog
|
53 |
+
|
54 |
+
# unconditional image captioning
|
55 |
+
inputs = processor(raw_image, return_tensors="pt")
|
56 |
+
|
57 |
+
out = model.generate(**inputs)
|
58 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
59 |
+
>>> a woman sitting on the beach with her dog
|
60 |
+
```
|
61 |
+
</details>
|
62 |
+
|
63 |
+
#### Running the model on GPU
|
64 |
+
|
65 |
+
##### In full precision
|
66 |
+
|
67 |
+
<details>
|
68 |
+
<summary> Click to expand </summary>
|
69 |
+
|
70 |
+
```python
|
71 |
+
import requests
|
72 |
+
from PIL import Image
|
73 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
74 |
+
|
75 |
+
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
76 |
+
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to("cuda")
|
77 |
+
|
78 |
+
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
79 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
80 |
+
|
81 |
+
# conditional image captioning
|
82 |
+
text = "a photography of"
|
83 |
+
inputs = processor(raw_image, text, return_tensors="pt").to("cuda")
|
84 |
+
|
85 |
+
out = model.generate(**inputs)
|
86 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
87 |
+
# >>> a photography of a woman and her dog
|
88 |
+
|
89 |
+
# unconditional image captioning
|
90 |
+
inputs = processor(raw_image, return_tensors="pt").to("cuda")
|
91 |
+
|
92 |
+
out = model.generate(**inputs)
|
93 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
94 |
+
>>> a woman sitting on the beach with her dog
|
95 |
+
```
|
96 |
+
</details>
|
97 |
+
|
98 |
+
##### In half precision (`float16`)
|
99 |
+
|
100 |
+
<details>
|
101 |
+
<summary> Click to expand </summary>
|
102 |
+
|
103 |
+
```python
|
104 |
+
import torch
|
105 |
+
import requests
|
106 |
+
from PIL import Image
|
107 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
108 |
+
|
109 |
+
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
110 |
+
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base", torch_dtype=torch.float16).to("cuda")
|
111 |
+
|
112 |
+
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
113 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
114 |
+
|
115 |
+
# conditional image captioning
|
116 |
+
text = "a photography of"
|
117 |
+
inputs = processor(raw_image, text, return_tensors="pt").to("cuda", torch.float16)
|
118 |
+
|
119 |
+
out = model.generate(**inputs)
|
120 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
121 |
+
# >>> a photography of a woman and her dog
|
122 |
+
|
123 |
+
# unconditional image captioning
|
124 |
+
inputs = processor(raw_image, return_tensors="pt").to("cuda", torch.float16)
|
125 |
+
|
126 |
+
out = model.generate(**inputs)
|
127 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
128 |
+
>>> a woman sitting on the beach with her dog
|
129 |
+
```
|
130 |
+
</details>
|
131 |
+
|
132 |
+
## BibTex and citation info
|
133 |
+
|
134 |
+
```
|
135 |
+
@misc{https://doi.org/10.48550/arxiv.2201.12086,
|
136 |
+
doi = {10.48550/ARXIV.2201.12086},
|
137 |
+
|
138 |
+
url = {https://arxiv.org/abs/2201.12086},
|
139 |
+
|
140 |
+
author = {Li, Junnan and Li, Dongxu and Xiong, Caiming and Hoi, Steven},
|
141 |
+
|
142 |
+
keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
143 |
+
|
144 |
+
title = {BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation},
|
145 |
+
|
146 |
+
publisher = {arXiv},
|
147 |
+
|
148 |
+
year = {2022},
|
149 |
+
|
150 |
+
copyright = {Creative Commons Attribution 4.0 International}
|
151 |
+
}
|
152 |
+
```
|
models/blip/blip-image-captioning-base/config.json
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_commit_hash": null,
|
3 |
+
"architectures": [
|
4 |
+
"BlipForConditionalGeneration"
|
5 |
+
],
|
6 |
+
"image_text_hidden_size": 256,
|
7 |
+
"initializer_factor": 1.0,
|
8 |
+
"logit_scale_init_value": 2.6592,
|
9 |
+
"model_type": "blip",
|
10 |
+
"projection_dim": 512,
|
11 |
+
"text_config": {
|
12 |
+
"_name_or_path": "",
|
13 |
+
"add_cross_attention": false,
|
14 |
+
"architectures": null,
|
15 |
+
"attention_probs_dropout_prob": 0.0,
|
16 |
+
"bad_words_ids": null,
|
17 |
+
"begin_suppress_tokens": null,
|
18 |
+
"bos_token_id": 30522,
|
19 |
+
"chunk_size_feed_forward": 0,
|
20 |
+
"cross_attention_hidden_size": null,
|
21 |
+
"decoder_start_token_id": null,
|
22 |
+
"diversity_penalty": 0.0,
|
23 |
+
"do_sample": false,
|
24 |
+
"early_stopping": false,
|
25 |
+
"encoder_no_repeat_ngram_size": 0,
|
26 |
+
"eos_token_id": 2,
|
27 |
+
"exponential_decay_length_penalty": null,
|
28 |
+
"finetuning_task": null,
|
29 |
+
"forced_bos_token_id": null,
|
30 |
+
"forced_eos_token_id": null,
|
31 |
+
"hidden_act": "gelu",
|
32 |
+
"hidden_dropout_prob": 0.0,
|
33 |
+
"hidden_size": 768,
|
34 |
+
"id2label": {
|
35 |
+
"0": "LABEL_0",
|
36 |
+
"1": "LABEL_1"
|
37 |
+
},
|
38 |
+
"initializer_factor": 1.0,
|
39 |
+
"initializer_range": 0.02,
|
40 |
+
"intermediate_size": 3072,
|
41 |
+
"is_decoder": true,
|
42 |
+
"is_encoder_decoder": false,
|
43 |
+
"label2id": {
|
44 |
+
"LABEL_0": 0,
|
45 |
+
"LABEL_1": 1
|
46 |
+
},
|
47 |
+
"layer_norm_eps": 1e-12,
|
48 |
+
"length_penalty": 1.0,
|
49 |
+
"max_length": 20,
|
50 |
+
"max_position_embeddings": 512,
|
51 |
+
"min_length": 0,
|
52 |
+
"model_type": "blip_text_model",
|
53 |
+
"no_repeat_ngram_size": 0,
|
54 |
+
"num_attention_heads": 12,
|
55 |
+
"num_beam_groups": 1,
|
56 |
+
"num_beams": 1,
|
57 |
+
"num_hidden_layers": 12,
|
58 |
+
"num_return_sequences": 1,
|
59 |
+
"output_attentions": false,
|
60 |
+
"output_hidden_states": false,
|
61 |
+
"output_scores": false,
|
62 |
+
"pad_token_id": 0,
|
63 |
+
"prefix": null,
|
64 |
+
"problem_type": null,
|
65 |
+
"projection_dim": 768,
|
66 |
+
"pruned_heads": {},
|
67 |
+
"remove_invalid_values": false,
|
68 |
+
"repetition_penalty": 1.0,
|
69 |
+
"return_dict": true,
|
70 |
+
"return_dict_in_generate": false,
|
71 |
+
"sep_token_id": 102,
|
72 |
+
"suppress_tokens": null,
|
73 |
+
"task_specific_params": null,
|
74 |
+
"temperature": 1.0,
|
75 |
+
"tf_legacy_loss": false,
|
76 |
+
"tie_encoder_decoder": false,
|
77 |
+
"tie_word_embeddings": true,
|
78 |
+
"tokenizer_class": null,
|
79 |
+
"top_k": 50,
|
80 |
+
"top_p": 1.0,
|
81 |
+
"torch_dtype": null,
|
82 |
+
"torchscript": false,
|
83 |
+
"transformers_version": "4.26.0.dev0",
|
84 |
+
"typical_p": 1.0,
|
85 |
+
"use_bfloat16": false,
|
86 |
+
"use_cache": true,
|
87 |
+
"vocab_size": 30524
|
88 |
+
},
|
89 |
+
"torch_dtype": "float32",
|
90 |
+
"transformers_version": null,
|
91 |
+
"vision_config": {
|
92 |
+
"_name_or_path": "",
|
93 |
+
"add_cross_attention": false,
|
94 |
+
"architectures": null,
|
95 |
+
"attention_dropout": 0.0,
|
96 |
+
"bad_words_ids": null,
|
97 |
+
"begin_suppress_tokens": null,
|
98 |
+
"bos_token_id": null,
|
99 |
+
"chunk_size_feed_forward": 0,
|
100 |
+
"cross_attention_hidden_size": null,
|
101 |
+
"decoder_start_token_id": null,
|
102 |
+
"diversity_penalty": 0.0,
|
103 |
+
"do_sample": false,
|
104 |
+
"dropout": 0.0,
|
105 |
+
"early_stopping": false,
|
106 |
+
"encoder_no_repeat_ngram_size": 0,
|
107 |
+
"eos_token_id": null,
|
108 |
+
"exponential_decay_length_penalty": null,
|
109 |
+
"finetuning_task": null,
|
110 |
+
"forced_bos_token_id": null,
|
111 |
+
"forced_eos_token_id": null,
|
112 |
+
"hidden_act": "gelu",
|
113 |
+
"hidden_size": 768,
|
114 |
+
"id2label": {
|
115 |
+
"0": "LABEL_0",
|
116 |
+
"1": "LABEL_1"
|
117 |
+
},
|
118 |
+
"image_size": 384,
|
119 |
+
"initializer_factor": 1.0,
|
120 |
+
"initializer_range": 0.02,
|
121 |
+
"intermediate_size": 3072,
|
122 |
+
"is_decoder": false,
|
123 |
+
"is_encoder_decoder": false,
|
124 |
+
"label2id": {
|
125 |
+
"LABEL_0": 0,
|
126 |
+
"LABEL_1": 1
|
127 |
+
},
|
128 |
+
"layer_norm_eps": 1e-05,
|
129 |
+
"length_penalty": 1.0,
|
130 |
+
"max_length": 20,
|
131 |
+
"min_length": 0,
|
132 |
+
"model_type": "blip_vision_model",
|
133 |
+
"no_repeat_ngram_size": 0,
|
134 |
+
"num_attention_heads": 12,
|
135 |
+
"num_beam_groups": 1,
|
136 |
+
"num_beams": 1,
|
137 |
+
"num_channels": 3,
|
138 |
+
"num_hidden_layers": 12,
|
139 |
+
"num_return_sequences": 1,
|
140 |
+
"output_attentions": false,
|
141 |
+
"output_hidden_states": false,
|
142 |
+
"output_scores": false,
|
143 |
+
"pad_token_id": null,
|
144 |
+
"patch_size": 16,
|
145 |
+
"prefix": null,
|
146 |
+
"problem_type": null,
|
147 |
+
"projection_dim": 512,
|
148 |
+
"pruned_heads": {},
|
149 |
+
"remove_invalid_values": false,
|
150 |
+
"repetition_penalty": 1.0,
|
151 |
+
"return_dict": true,
|
152 |
+
"return_dict_in_generate": false,
|
153 |
+
"sep_token_id": null,
|
154 |
+
"suppress_tokens": null,
|
155 |
+
"task_specific_params": null,
|
156 |
+
"temperature": 1.0,
|
157 |
+
"tf_legacy_loss": false,
|
158 |
+
"tie_encoder_decoder": false,
|
159 |
+
"tie_word_embeddings": true,
|
160 |
+
"tokenizer_class": null,
|
161 |
+
"top_k": 50,
|
162 |
+
"top_p": 1.0,
|
163 |
+
"torch_dtype": null,
|
164 |
+
"torchscript": false,
|
165 |
+
"transformers_version": "4.26.0.dev0",
|
166 |
+
"typical_p": 1.0,
|
167 |
+
"use_bfloat16": false
|
168 |
+
}
|
169 |
+
}
|
models/blip/blip-image-captioning-base/preprocessor_config.json
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"do_resize": true,
|
4 |
+
"image_mean": [
|
5 |
+
0.48145466,
|
6 |
+
0.4578275,
|
7 |
+
0.40821073
|
8 |
+
],
|
9 |
+
"image_processor_type": "BlipImageProcessor",
|
10 |
+
"image_std": [
|
11 |
+
0.26862954,
|
12 |
+
0.26130258,
|
13 |
+
0.27577711
|
14 |
+
],
|
15 |
+
"processor_class": "BlipProcessor",
|
16 |
+
"size": 384
|
17 |
+
}
|
models/blip/blip-image-captioning-base/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d6638651a5526cc2ede56f2b5104d6851b0755816d220e5e046870430180c767
|
3 |
+
size 989820849
|
models/blip/blip-image-captioning-base/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
models/blip/blip-image-captioning-base/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
models/blip/blip-image-captioning-base/tokenizer_config.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"do_basic_tokenize": true,
|
4 |
+
"do_lower_case": true,
|
5 |
+
"mask_token": "[MASK]",
|
6 |
+
"model_max_length": 512,
|
7 |
+
"name_or_path": "bert-base-uncased",
|
8 |
+
"never_split": null,
|
9 |
+
"pad_token": "[PAD]",
|
10 |
+
"processor_class": "BlipProcessor",
|
11 |
+
"sep_token": "[SEP]",
|
12 |
+
"special_tokens_map_file": null,
|
13 |
+
"strip_accents": null,
|
14 |
+
"tokenize_chinese_chars": true,
|
15 |
+
"tokenizer_class": "BertTokenizer",
|
16 |
+
"unk_token": "[UNK]",
|
17 |
+
"model_input_names": [
|
18 |
+
"input_ids",
|
19 |
+
"attention_mask"
|
20 |
+
]
|
21 |
+
}
|
models/blip/blip-image-captioning-base/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
models/i3d/i3d_pretrained_400.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:55095f049e706479d48e221adcdb145b2b9dc930ba28b081ed72367ffaa32343
|
3 |
+
size 50939526
|
models/inpaint_blocks/config.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "ControlNetModel",
|
3 |
+
"_diffusers_version": "0.16.0.dev0",
|
4 |
+
"_name_or_path": "/home/patrick/controlnet_v1_1/control_v11p_sd15_inpaint",
|
5 |
+
"act_fn": "silu",
|
6 |
+
"attention_head_dim": 8,
|
7 |
+
"block_out_channels": [
|
8 |
+
320,
|
9 |
+
640,
|
10 |
+
1280,
|
11 |
+
1280
|
12 |
+
],
|
13 |
+
"class_embed_type": null,
|
14 |
+
"conditioning_embedding_out_channels": [
|
15 |
+
16,
|
16 |
+
32,
|
17 |
+
96,
|
18 |
+
256
|
19 |
+
],
|
20 |
+
"controlnet_conditioning_channel_order": "rgb",
|
21 |
+
"cross_attention_dim": 768,
|
22 |
+
"down_block_types": [
|
23 |
+
"CrossAttnDownBlock2D",
|
24 |
+
"CrossAttnDownBlock2D",
|
25 |
+
"CrossAttnDownBlock2D",
|
26 |
+
"DownBlock2D"
|
27 |
+
],
|
28 |
+
"downsample_padding": 1,
|
29 |
+
"flip_sin_to_cos": true,
|
30 |
+
"freq_shift": 0,
|
31 |
+
"in_channels": 4,
|
32 |
+
"layers_per_block": 2,
|
33 |
+
"mid_block_scale_factor": 1,
|
34 |
+
"norm_eps": 1e-05,
|
35 |
+
"norm_num_groups": 32,
|
36 |
+
"num_class_embeds": null,
|
37 |
+
"only_cross_attention": false,
|
38 |
+
"projection_class_embeddings_input_dim": null,
|
39 |
+
"resnet_time_scale_shift": "default",
|
40 |
+
"upcast_attention": false,
|
41 |
+
"use_linear_projection": false
|
42 |
+
}
|
models/inpaint_blocks/control_net_inpaint.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
import torch
|
3 |
+
import os
|
4 |
+
from huggingface_hub import HfApi
|
5 |
+
from pathlib import Path
|
6 |
+
from diffusers.utils import load_image
|
7 |
+
from PIL import Image
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
from diffusers import (
|
11 |
+
ControlNetModel,
|
12 |
+
StableDiffusionControlNetPipeline,
|
13 |
+
DDIMScheduler,
|
14 |
+
)
|
15 |
+
import sys
|
16 |
+
|
17 |
+
checkpoint = sys.argv[1]
|
18 |
+
|
19 |
+
|
20 |
+
# pre-process image and mask
|
21 |
+
image = load_image("https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png").convert('RGB')
|
22 |
+
mask_image = load_image("https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png").convert("L")
|
23 |
+
|
24 |
+
# convert to float32
|
25 |
+
image = np.asarray(image, dtype=np.float32)
|
26 |
+
mask_image = np.asarray(mask_image, dtype=np.float32)
|
27 |
+
|
28 |
+
image[mask_image > 127] = -255.0
|
29 |
+
image = torch.from_numpy(image)[None].permute(0, 3, 1, 2) / 255.0
|
30 |
+
|
31 |
+
prompt = "A blue cat sitting on a park bench"
|
32 |
+
|
33 |
+
controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16)
|
34 |
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
35 |
+
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
|
36 |
+
)
|
37 |
+
|
38 |
+
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
39 |
+
pipe.enable_model_cpu_offload()
|
40 |
+
|
41 |
+
generator = torch.manual_seed(0)
|
42 |
+
out_image = pipe(prompt, num_inference_steps=20, generator=generator, image=image, guidance_scale=9.0).images[0]
|
43 |
+
|
44 |
+
path = os.path.join(Path.home(), "images", "aa.png")
|
45 |
+
out_image.save(path)
|
46 |
+
|
47 |
+
api = HfApi()
|
48 |
+
|
49 |
+
api.upload_file(
|
50 |
+
path_or_fileobj=path,
|
51 |
+
path_in_repo=path.split("/")[-1],
|
52 |
+
repo_id="patrickvonplaten/images",
|
53 |
+
repo_type="dataset",
|
54 |
+
)
|
55 |
+
print("https://huggingface.co/datasets/patrickvonplaten/images/blob/main/aa.png")
|
models/inpaint_blocks/diffusion_pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:208d550a521a9d503c4c1c2fc6cef8341449b0181ddc0027798e7808e0ffc667
|
3 |
+
size 1445254969
|
models/sd_blocks/feature_extractor/preprocessor_config.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"crop_size": 224,
|
3 |
+
"do_center_crop": true,
|
4 |
+
"do_convert_rgb": true,
|
5 |
+
"do_normalize": true,
|
6 |
+
"do_resize": true,
|
7 |
+
"feature_extractor_type": "CLIPFeatureExtractor",
|
8 |
+
"image_mean": [
|
9 |
+
0.48145466,
|
10 |
+
0.4578275,
|
11 |
+
0.40821073
|
12 |
+
],
|
13 |
+
"image_std": [
|
14 |
+
0.26862954,
|
15 |
+
0.26130258,
|
16 |
+
0.27577711
|
17 |
+
],
|
18 |
+
"resample": 3,
|
19 |
+
"size": 224
|
20 |
+
}
|
models/sd_blocks/model_index.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "StableDiffusionPipeline",
|
3 |
+
"_diffusers_version": "0.6.0",
|
4 |
+
"feature_extractor": [
|
5 |
+
"transformers",
|
6 |
+
"CLIPImageProcessor"
|
7 |
+
],
|
8 |
+
"safety_checker": [
|
9 |
+
"stable_diffusion",
|
10 |
+
"StableDiffusionSafetyChecker"
|
11 |
+
],
|
12 |
+
"scheduler": [
|
13 |
+
"diffusers",
|
14 |
+
"PNDMScheduler"
|
15 |
+
],
|
16 |
+
"text_encoder": [
|
17 |
+
"transformers",
|
18 |
+
"CLIPTextModel"
|
19 |
+
],
|
20 |
+
"tokenizer": [
|
21 |
+
"transformers",
|
22 |
+
"CLIPTokenizer"
|
23 |
+
],
|
24 |
+
"unet": [
|
25 |
+
"diffusers",
|
26 |
+
"UNet2DConditionModel"
|
27 |
+
],
|
28 |
+
"vae": [
|
29 |
+
"diffusers",
|
30 |
+
"AutoencoderKL"
|
31 |
+
]
|
32 |
+
}
|
models/sd_blocks/safety_checker/config.json
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_commit_hash": "4bb648a606ef040e7685bde262611766a5fdd67b",
|
3 |
+
"_name_or_path": "CompVis/stable-diffusion-safety-checker",
|
4 |
+
"architectures": [
|
5 |
+
"StableDiffusionSafetyChecker"
|
6 |
+
],
|
7 |
+
"initializer_factor": 1.0,
|
8 |
+
"logit_scale_init_value": 2.6592,
|
9 |
+
"model_type": "clip",
|
10 |
+
"projection_dim": 768,
|
11 |
+
"text_config": {
|
12 |
+
"_name_or_path": "",
|
13 |
+
"add_cross_attention": false,
|
14 |
+
"architectures": null,
|
15 |
+
"attention_dropout": 0.0,
|
16 |
+
"bad_words_ids": null,
|
17 |
+
"bos_token_id": 0,
|
18 |
+
"chunk_size_feed_forward": 0,
|
19 |
+
"cross_attention_hidden_size": null,
|
20 |
+
"decoder_start_token_id": null,
|
21 |
+
"diversity_penalty": 0.0,
|
22 |
+
"do_sample": false,
|
23 |
+
"dropout": 0.0,
|
24 |
+
"early_stopping": false,
|
25 |
+
"encoder_no_repeat_ngram_size": 0,
|
26 |
+
"eos_token_id": 2,
|
27 |
+
"exponential_decay_length_penalty": null,
|
28 |
+
"finetuning_task": null,
|
29 |
+
"forced_bos_token_id": null,
|
30 |
+
"forced_eos_token_id": null,
|
31 |
+
"hidden_act": "quick_gelu",
|
32 |
+
"hidden_size": 768,
|
33 |
+
"id2label": {
|
34 |
+
"0": "LABEL_0",
|
35 |
+
"1": "LABEL_1"
|
36 |
+
},
|
37 |
+
"initializer_factor": 1.0,
|
38 |
+
"initializer_range": 0.02,
|
39 |
+
"intermediate_size": 3072,
|
40 |
+
"is_decoder": false,
|
41 |
+
"is_encoder_decoder": false,
|
42 |
+
"label2id": {
|
43 |
+
"LABEL_0": 0,
|
44 |
+
"LABEL_1": 1
|
45 |
+
},
|
46 |
+
"layer_norm_eps": 1e-05,
|
47 |
+
"length_penalty": 1.0,
|
48 |
+
"max_length": 20,
|
49 |
+
"max_position_embeddings": 77,
|
50 |
+
"min_length": 0,
|
51 |
+
"model_type": "clip_text_model",
|
52 |
+
"no_repeat_ngram_size": 0,
|
53 |
+
"num_attention_heads": 12,
|
54 |
+
"num_beam_groups": 1,
|
55 |
+
"num_beams": 1,
|
56 |
+
"num_hidden_layers": 12,
|
57 |
+
"num_return_sequences": 1,
|
58 |
+
"output_attentions": false,
|
59 |
+
"output_hidden_states": false,
|
60 |
+
"output_scores": false,
|
61 |
+
"pad_token_id": 1,
|
62 |
+
"prefix": null,
|
63 |
+
"problem_type": null,
|
64 |
+
"pruned_heads": {},
|
65 |
+
"remove_invalid_values": false,
|
66 |
+
"repetition_penalty": 1.0,
|
67 |
+
"return_dict": true,
|
68 |
+
"return_dict_in_generate": false,
|
69 |
+
"sep_token_id": null,
|
70 |
+
"task_specific_params": null,
|
71 |
+
"temperature": 1.0,
|
72 |
+
"tf_legacy_loss": false,
|
73 |
+
"tie_encoder_decoder": false,
|
74 |
+
"tie_word_embeddings": true,
|
75 |
+
"tokenizer_class": null,
|
76 |
+
"top_k": 50,
|
77 |
+
"top_p": 1.0,
|
78 |
+
"torch_dtype": null,
|
79 |
+
"torchscript": false,
|
80 |
+
"transformers_version": "4.22.0.dev0",
|
81 |
+
"typical_p": 1.0,
|
82 |
+
"use_bfloat16": false,
|
83 |
+
"vocab_size": 49408
|
84 |
+
},
|
85 |
+
"text_config_dict": {
|
86 |
+
"hidden_size": 768,
|
87 |
+
"intermediate_size": 3072,
|
88 |
+
"num_attention_heads": 12,
|
89 |
+
"num_hidden_layers": 12
|
90 |
+
},
|
91 |
+
"torch_dtype": "float32",
|
92 |
+
"transformers_version": null,
|
93 |
+
"vision_config": {
|
94 |
+
"_name_or_path": "",
|
95 |
+
"add_cross_attention": false,
|
96 |
+
"architectures": null,
|
97 |
+
"attention_dropout": 0.0,
|
98 |
+
"bad_words_ids": null,
|
99 |
+
"bos_token_id": null,
|
100 |
+
"chunk_size_feed_forward": 0,
|
101 |
+
"cross_attention_hidden_size": null,
|
102 |
+
"decoder_start_token_id": null,
|
103 |
+
"diversity_penalty": 0.0,
|
104 |
+
"do_sample": false,
|
105 |
+
"dropout": 0.0,
|
106 |
+
"early_stopping": false,
|
107 |
+
"encoder_no_repeat_ngram_size": 0,
|
108 |
+
"eos_token_id": null,
|
109 |
+
"exponential_decay_length_penalty": null,
|
110 |
+
"finetuning_task": null,
|
111 |
+
"forced_bos_token_id": null,
|
112 |
+
"forced_eos_token_id": null,
|
113 |
+
"hidden_act": "quick_gelu",
|
114 |
+
"hidden_size": 1024,
|
115 |
+
"id2label": {
|
116 |
+
"0": "LABEL_0",
|
117 |
+
"1": "LABEL_1"
|
118 |
+
},
|
119 |
+
"image_size": 224,
|
120 |
+
"initializer_factor": 1.0,
|
121 |
+
"initializer_range": 0.02,
|
122 |
+
"intermediate_size": 4096,
|
123 |
+
"is_decoder": false,
|
124 |
+
"is_encoder_decoder": false,
|
125 |
+
"label2id": {
|
126 |
+
"LABEL_0": 0,
|
127 |
+
"LABEL_1": 1
|
128 |
+
},
|
129 |
+
"layer_norm_eps": 1e-05,
|
130 |
+
"length_penalty": 1.0,
|
131 |
+
"max_length": 20,
|
132 |
+
"min_length": 0,
|
133 |
+
"model_type": "clip_vision_model",
|
134 |
+
"no_repeat_ngram_size": 0,
|
135 |
+
"num_attention_heads": 16,
|
136 |
+
"num_beam_groups": 1,
|
137 |
+
"num_beams": 1,
|
138 |
+
"num_channels": 3,
|
139 |
+
"num_hidden_layers": 24,
|
140 |
+
"num_return_sequences": 1,
|
141 |
+
"output_attentions": false,
|
142 |
+
"output_hidden_states": false,
|
143 |
+
"output_scores": false,
|
144 |
+
"pad_token_id": null,
|
145 |
+
"patch_size": 14,
|
146 |
+
"prefix": null,
|
147 |
+
"problem_type": null,
|
148 |
+
"pruned_heads": {},
|
149 |
+
"remove_invalid_values": false,
|
150 |
+
"repetition_penalty": 1.0,
|
151 |
+
"return_dict": true,
|
152 |
+
"return_dict_in_generate": false,
|
153 |
+
"sep_token_id": null,
|
154 |
+
"task_specific_params": null,
|
155 |
+
"temperature": 1.0,
|
156 |
+
"tf_legacy_loss": false,
|
157 |
+
"tie_encoder_decoder": false,
|
158 |
+
"tie_word_embeddings": true,
|
159 |
+
"tokenizer_class": null,
|
160 |
+
"top_k": 50,
|
161 |
+
"top_p": 1.0,
|
162 |
+
"torch_dtype": null,
|
163 |
+
"torchscript": false,
|
164 |
+
"transformers_version": "4.22.0.dev0",
|
165 |
+
"typical_p": 1.0,
|
166 |
+
"use_bfloat16": false
|
167 |
+
},
|
168 |
+
"vision_config_dict": {
|
169 |
+
"hidden_size": 1024,
|
170 |
+
"intermediate_size": 4096,
|
171 |
+
"num_attention_heads": 16,
|
172 |
+
"num_hidden_layers": 24,
|
173 |
+
"patch_size": 14
|
174 |
+
}
|
175 |
+
}
|
models/sd_blocks/safety_checker/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9d6a233ff6fd5ccb9f76fd99618d73369c52dd3d8222376384d0e601911089e8
|
3 |
+
size 1215981830
|
models/sd_blocks/safety_checker/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:193490b58ef62739077262e833bf091c66c29488058681ac25cf7df3d8190974
|
3 |
+
size 1216061799
|
models/sd_blocks/scheduler/scheduler_config.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "PNDMScheduler",
|
3 |
+
"_diffusers_version": "0.6.0",
|
4 |
+
"beta_end": 0.012,
|
5 |
+
"beta_schedule": "scaled_linear",
|
6 |
+
"beta_start": 0.00085,
|
7 |
+
"num_train_timesteps": 1000,
|
8 |
+
"set_alpha_to_one": false,
|
9 |
+
"skip_prk_steps": true,
|
10 |
+
"steps_offset": 1,
|
11 |
+
"trained_betas": null,
|
12 |
+
"clip_sample": false
|
13 |
+
}
|
models/sd_blocks/text_encoder/config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "openai/clip-vit-large-patch14",
|
3 |
+
"architectures": [
|
4 |
+
"CLIPTextModel"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"dropout": 0.0,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "quick_gelu",
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_factor": 1.0,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 3072,
|
15 |
+
"layer_norm_eps": 1e-05,
|
16 |
+
"max_position_embeddings": 77,
|
17 |
+
"model_type": "clip_text_model",
|
18 |
+
"num_attention_heads": 12,
|
19 |
+
"num_hidden_layers": 12,
|
20 |
+
"pad_token_id": 1,
|
21 |
+
"projection_dim": 768,
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.22.0.dev0",
|
24 |
+
"vocab_size": 49408
|
25 |
+
}
|
models/sd_blocks/text_encoder/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d008943c017f0092921106440254dbbe00b6a285f7883ec8ba160c3faad88334
|
3 |
+
size 492265874
|
models/sd_blocks/text_encoder/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:770a47a9ffdcfda0b05506a7888ed714d06131d60267e6cf52765d61cf59fd67
|
3 |
+
size 492305335
|
models/sd_blocks/tokenizer/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
models/sd_blocks/tokenizer/special_tokens_map.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<|startoftext|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|endoftext|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": "<|endoftext|>",
|
17 |
+
"unk_token": {
|
18 |
+
"content": "<|endoftext|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": true,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
}
|
24 |
+
}
|
models/sd_blocks/tokenizer/tokenizer_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"bos_token": {
|
4 |
+
"__type": "AddedToken",
|
5 |
+
"content": "<|startoftext|>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": true,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false
|
10 |
+
},
|
11 |
+
"do_lower_case": true,
|
12 |
+
"eos_token": {
|
13 |
+
"__type": "AddedToken",
|
14 |
+
"content": "<|endoftext|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": true,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false
|
19 |
+
},
|
20 |
+
"errors": "replace",
|
21 |
+
"model_max_length": 77,
|
22 |
+
"name_or_path": "openai/clip-vit-large-patch14",
|
23 |
+
"pad_token": "<|endoftext|>",
|
24 |
+
"special_tokens_map_file": "./special_tokens_map.json",
|
25 |
+
"tokenizer_class": "CLIPTokenizer",
|
26 |
+
"unk_token": {
|
27 |
+
"__type": "AddedToken",
|
28 |
+
"content": "<|endoftext|>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": true,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false
|
33 |
+
}
|
34 |
+
}
|
models/sd_blocks/tokenizer/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
models/sd_blocks/unet/config.json
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "UNet2DConditionModel",
|
3 |
+
"_diffusers_version": "0.6.0",
|
4 |
+
"act_fn": "silu",
|
5 |
+
"attention_head_dim": 8,
|
6 |
+
"block_out_channels": [
|
7 |
+
320,
|
8 |
+
640,
|
9 |
+
1280,
|
10 |
+
1280
|
11 |
+
],
|
12 |
+
"center_input_sample": false,
|
13 |
+
"cross_attention_dim": 768,
|
14 |
+
"down_block_types": [
|
15 |
+
"CrossAttnDownBlock2D",
|
16 |
+
"CrossAttnDownBlock2D",
|
17 |
+
"CrossAttnDownBlock2D",
|
18 |
+
"DownBlock2D"
|
19 |
+
],
|
20 |
+
"downsample_padding": 1,
|
21 |
+
"flip_sin_to_cos": true,
|
22 |
+
"freq_shift": 0,
|
23 |
+
"in_channels": 4,
|
24 |
+
"layers_per_block": 2,
|
25 |
+
"mid_block_scale_factor": 1,
|
26 |
+
"norm_eps": 1e-05,
|
27 |
+
"norm_num_groups": 32,
|
28 |
+
"out_channels": 4,
|
29 |
+
"sample_size": 64,
|
30 |
+
"up_block_types": [
|
31 |
+
"UpBlock2D",
|
32 |
+
"CrossAttnUpBlock2D",
|
33 |
+
"CrossAttnUpBlock2D",
|
34 |
+
"CrossAttnUpBlock2D"
|
35 |
+
]
|
36 |
+
}
|
models/sd_blocks/unet/diffusion_pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c7da0e21ba7ea50637bee26e81c220844defdf01aafca02b2c42ecdadb813de4
|
3 |
+
size 3438354725
|
models/sd_blocks/unet/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:19da7aaa4b880e59d56843f1fcb4dd9b599c28a1d9d9af7c1143057c8ffae9f1
|
3 |
+
size 3438167540
|
models/sd_blocks/vae/config.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "AutoencoderKL",
|
3 |
+
"_diffusers_version": "0.6.0",
|
4 |
+
"act_fn": "silu",
|
5 |
+
"block_out_channels": [
|
6 |
+
128,
|
7 |
+
256,
|
8 |
+
512,
|
9 |
+
512
|
10 |
+
],
|
11 |
+
"down_block_types": [
|
12 |
+
"DownEncoderBlock2D",
|
13 |
+
"DownEncoderBlock2D",
|
14 |
+
"DownEncoderBlock2D",
|
15 |
+
"DownEncoderBlock2D"
|
16 |
+
],
|
17 |
+
"in_channels": 3,
|
18 |
+
"latent_channels": 4,
|
19 |
+
"layers_per_block": 2,
|
20 |
+
"norm_num_groups": 32,
|
21 |
+
"out_channels": 3,
|
22 |
+
"sample_size": 512,
|
23 |
+
"up_block_types": [
|
24 |
+
"UpDecoderBlock2D",
|
25 |
+
"UpDecoderBlock2D",
|
26 |
+
"UpDecoderBlock2D",
|
27 |
+
"UpDecoderBlock2D"
|
28 |
+
]
|
29 |
+
}
|
models/sd_blocks/vae/diffusion_pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b134cded8eb78b184aefb8805b6b572f36fa77b255c483665dda931fa0130c5
|
3 |
+
size 334707217
|
models/sd_blocks/vae/diffusion_pytorch_model.fp16.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b7643b3e40b9f128eda5fe174fea73c3ef3903562651fb344a79439709c2e503
|
3 |
+
size 167405651
|
models/sd_blocks/vae/diffusion_pytorch_model.fp16.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4fbcf0ebe55a0984f5a5e00d8c4521d52359af7229bb4d81890039d2aa16dd7c
|
3 |
+
size 167335342
|
models/sd_blocks/vae/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a2b5134f4dbc140d9c11f11cba3233099e00af40f262f136c691fb7d38d2194c
|
3 |
+
size 334643276
|
models/sd_vae_ft-mse/config.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "AutoencoderKL",
|
3 |
+
"_diffusers_version": "0.4.2",
|
4 |
+
"act_fn": "silu",
|
5 |
+
"block_out_channels": [
|
6 |
+
128,
|
7 |
+
256,
|
8 |
+
512,
|
9 |
+
512
|
10 |
+
],
|
11 |
+
"down_block_types": [
|
12 |
+
"DownEncoderBlock2D",
|
13 |
+
"DownEncoderBlock2D",
|
14 |
+
"DownEncoderBlock2D",
|
15 |
+
"DownEncoderBlock2D"
|
16 |
+
],
|
17 |
+
"in_channels": 3,
|
18 |
+
"latent_channels": 4,
|
19 |
+
"layers_per_block": 2,
|
20 |
+
"norm_num_groups": 32,
|
21 |
+
"out_channels": 3,
|
22 |
+
"sample_size": 256,
|
23 |
+
"up_block_types": [
|
24 |
+
"UpDecoderBlock2D",
|
25 |
+
"UpDecoderBlock2D",
|
26 |
+
"UpDecoderBlock2D",
|
27 |
+
"UpDecoderBlock2D"
|
28 |
+
]
|
29 |
+
}
|
models/sd_vae_ft-mse/diffusion_pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b4889b6b1d4ce7ae320a02dedaeff1780ad77d415ea0d744b476155c6377ddc
|
3 |
+
size 334707217
|
models/temporal_blocks/mm_sd_v15_v2.ckpt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:69ed0f5fef82b110aca51bcab73b21104242bc65d6ab4b8b2a2a94d31cad1bf0
|
3 |
+
size 1817888431
|