eatdianatoday commited on
Commit
f3b4e44
1 Parent(s): d53f935

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +64 -1
  2. gitattributes.txt +32 -0
  3. model_index.json +32 -0
README.md CHANGED
@@ -1,3 +1,66 @@
1
  ---
2
- license: creativeml-openrail-m
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language:
3
+ - en
4
+ tags:
5
+ - stable-diffusion
6
+ - text-to-image
7
+ license: unknown
8
+ inference: false
9
  ---
10
+
11
+ # Novelai-Diffusion
12
+
13
+ Novelai-Diffusion is a latent diffusion model which can create best quality anime image.
14
+
15
+ Here is the diffusers version of the model. Just to make it easier to use Novelai-Diffusion for all.
16
+
17
+ # Gradio & Colab Demo
18
+
19
+ There is a [Gradio](https://github.com/gradio-app/gradio) Web UI and Colab with Diffusers to run Novelai Diffusion:
20
+
21
+ [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1fNscA4Xqga8DZVPYZo17OUyzk7tzOZEw)
22
+
23
+ Run Novelai Diffusion on TPU !!! (Beta):
24
+
25
+ [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1r_ouUxFIFQzTJstnSJT0jGYNObc6UGkA?usp=sharing)
26
+
27
+ ## Example Code
28
+
29
+ ### pytorch
30
+
31
+ ```python
32
+ from diffusers import DiffusionPipeline
33
+ import torch
34
+
35
+ pipe = DiffusionPipeline.from_pretrained("animelover/novelai-diffusion", custom_pipeline="waifu-research-department/long-prompt-weighting-pipeline", torch_dtype=torch.float16)
36
+ pipe.safety_checker = None # we don't need safety checker. you can add not safe words to negative prompt instead.
37
+ pipe = pipe.to("cuda")
38
+
39
+ prompt = "best quality, masterpiece, 1girl, cute, looking at viewer, smiling, open mouth, white hair, red eyes, white kimono, sakura petal"
40
+ neg_prompt = "lowres, bad anatomy, error body, error hair, error arm, error hands, bad hands, error fingers, bad fingers, missing fingers, error legs, bad legs, multiple legs, missing legs, error lighting, error shadow, error reflection, text, error, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry"
41
+ # we don't need autocast here, because autocast will make speed slow down.
42
+ image = pipe.text2img(prompt,negative_prompt=neg_prompt, width=512,height=768,max_embeddings_multiples=5,guidance_scale=12).images[0]
43
+ image.save("test.png")
44
+ ```
45
+
46
+ ### onnxruntime
47
+
48
+ ```python
49
+ from diffusers import DiffusionPipeline
50
+
51
+ pipe = DiffusionPipeline.from_pretrained("animelover/novelai-diffusion", revision="onnx16",
52
+ custom_pipeline="waifu-research-department/onnx-long-prompt-weighting-pipeline",
53
+ provider="CUDAExecutionProvider")
54
+ pipe.safety_checker = None # we don't need safety checker. you can add not safe words to negative prompt instead.
55
+
56
+ prompt = "best quality, masterpiece, 1girl, cute, looking at viewer, smiling, open mouth, white hair, red eyes, white kimono, sakura petal"
57
+ neg_prompt = "lowres, bad anatomy, error body, error hair, error arm, error hands, bad hands, error fingers, bad fingers, missing fingers, error legs, bad legs, multiple legs, missing legs, error lighting, error shadow, error reflection, text, error, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry"
58
+ image = pipe.text2img(prompt,negative_prompt=neg_prompt, width=512,height=768,max_embeddings_multiples=5,guidance_scale=12).images[0]
59
+ image.save("test.png")
60
+ ```
61
+
62
+ note: we can input long prompt and adjust weighting by using "waifu-research-department/long-prompt-weighting-pipeline". it requires diffusers>=0.4.0 .
63
+
64
+ ## Acknowledgements
65
+
66
+ Thanks to [novelai](https://novelai.net/) for this awesome model. Support them if you can.
gitattributes.txt ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.npy filter=lfs diff=lfs merge=lfs -text
14
+ *.npz filter=lfs diff=lfs merge=lfs -text
15
+ *.onnx filter=lfs diff=lfs merge=lfs -text
16
+ *.ot filter=lfs diff=lfs merge=lfs -text
17
+ *.parquet filter=lfs diff=lfs merge=lfs -text
18
+ *.pb filter=lfs diff=lfs merge=lfs -text
19
+ *.pickle filter=lfs diff=lfs merge=lfs -text
20
+ *.pkl filter=lfs diff=lfs merge=lfs -text
21
+ *.pt filter=lfs diff=lfs merge=lfs -text
22
+ *.pth filter=lfs diff=lfs merge=lfs -text
23
+ *.rar filter=lfs diff=lfs merge=lfs -text
24
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
26
+ *.tflite filter=lfs diff=lfs merge=lfs -text
27
+ *.tgz filter=lfs diff=lfs merge=lfs -text
28
+ *.wasm filter=lfs diff=lfs merge=lfs -text
29
+ *.xz filter=lfs diff=lfs merge=lfs -text
30
+ *.zip filter=lfs diff=lfs merge=lfs -text
31
+ *.zst filter=lfs diff=lfs merge=lfs -text
32
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
model_index.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionPipeline",
3
+ "_diffusers_version": "0.5.1",
4
+ "feature_extractor": [
5
+ "transformers",
6
+ "CLIPFeatureExtractor"
7
+ ],
8
+ "safety_checker": [
9
+ "stable_diffusion",
10
+ "StableDiffusionSafetyChecker"
11
+ ],
12
+ "scheduler": [
13
+ "diffusers",
14
+ "PNDMScheduler"
15
+ ],
16
+ "text_encoder": [
17
+ "transformers",
18
+ "CLIPTextModel"
19
+ ],
20
+ "tokenizer": [
21
+ "transformers",
22
+ "CLIPTokenizer"
23
+ ],
24
+ "unet": [
25
+ "diffusers",
26
+ "UNet2DConditionModel"
27
+ ],
28
+ "vae": [
29
+ "diffusers",
30
+ "AutoencoderKL"
31
+ ]
32
+ }