Diffusers
Safetensors
English
AmusedPipeline
art

Model URI doesn't work

#4
by Trotter - opened
Files changed (1) hide show
  1. README.md +8 -8
README.md CHANGED
@@ -16,8 +16,8 @@ tags:
16
 
17
  | Model | Params |
18
  |-------|--------|
19
- | [amused-256](https://huggingface.co/huggingface/amused-256) | 603M |
20
- | [amused-512](https://huggingface.co/huggingface/amused-512) | 608M |
21
 
22
  Amused is a lightweight text to image model based off of the [muse](https://arxiv.org/pdf/2301.00704.pdf) architecture. Amused is particularly useful in applications that require a lightweight and fast model such as generating many images quickly at once.
23
 
@@ -34,7 +34,7 @@ import torch
34
  from diffusers import AmusedPipeline
35
 
36
  pipe = AmusedPipeline.from_pretrained(
37
- "huggingface/amused-256", variant="fp16", torch_dtype=torch.float16
38
  )
39
  pipe.vqvae.to(torch.float32) # vqvae is producing nans in fp16
40
  pipe = pipe.to("cuda")
@@ -53,7 +53,7 @@ import torch
53
  from diffusers import AmusedPipeline
54
 
55
  pipe = AmusedPipeline.from_pretrained(
56
- "huggingface/amused-512", variant="fp16", torch_dtype=torch.float16
57
  )
58
  pipe.vqvae.to(torch.float32) # vqvae is producing nans n fp16
59
  pipe = pipe.to("cuda")
@@ -103,7 +103,7 @@ from diffusers import AmusedImg2ImgPipeline
103
  from diffusers.utils import load_image
104
 
105
  pipe = AmusedImg2ImgPipeline.from_pretrained(
106
- "huggingface/amused-512", variant="fp16", torch_dtype=torch.float16
107
  )
108
  pipe.vqvae.to(torch.float32) # vqvae is producing nans in fp16
109
  pipe = pipe.to("cuda")
@@ -134,7 +134,7 @@ from diffusers.utils import load_image
134
  from PIL import Image
135
 
136
  pipe = AmusedInpaintPipeline.from_pretrained(
137
- "huggingface/amused-256", variant="fp16", torch_dtype=torch.float16
138
  )
139
  pipe.vqvae.to(torch.float32) # vqvae is producing nans in fp16
140
  pipe = pipe.to("cuda")
@@ -171,7 +171,7 @@ from diffusers import AmusedInpaintPipeline
171
  from diffusers.utils import load_image
172
 
173
  pipe = AmusedInpaintPipeline.from_pretrained(
174
- "huggingface/amused-512", variant="fp16", torch_dtype=torch.float16
175
  )
176
  pipe.vqvae.to(torch.float32) # vqvae is producing nans in fp16
177
  pipe = pipe.to("cuda")
@@ -239,7 +239,7 @@ import torch
239
  from diffusers import AmusedPipeline
240
 
241
  pipe = AmusedPipeline.from_pretrained(
242
- "huggingface/amused-256", variant="fp16", torch_dtype=torch.float16
243
  )
244
 
245
  # HERE use torch.compile
 
16
 
17
  | Model | Params |
18
  |-------|--------|
19
+ | [amused-256](https://huggingface.co/amused/amused-256) | 603M |
20
+ | [amused-512](https://huggingface.co/amused/amused-512) | 608M |
21
 
22
  Amused is a lightweight text to image model based off of the [muse](https://arxiv.org/pdf/2301.00704.pdf) architecture. Amused is particularly useful in applications that require a lightweight and fast model such as generating many images quickly at once.
23
 
 
34
  from diffusers import AmusedPipeline
35
 
36
  pipe = AmusedPipeline.from_pretrained(
37
+ "amused/amused-256", variant="fp16", torch_dtype=torch.float16
38
  )
39
  pipe.vqvae.to(torch.float32) # vqvae is producing nans in fp16
40
  pipe = pipe.to("cuda")
 
53
  from diffusers import AmusedPipeline
54
 
55
  pipe = AmusedPipeline.from_pretrained(
56
+ "amused/amused-512", variant="fp16", torch_dtype=torch.float16
57
  )
58
  pipe.vqvae.to(torch.float32) # vqvae is producing nans n fp16
59
  pipe = pipe.to("cuda")
 
103
  from diffusers.utils import load_image
104
 
105
  pipe = AmusedImg2ImgPipeline.from_pretrained(
106
+ "amused/amused-512", variant="fp16", torch_dtype=torch.float16
107
  )
108
  pipe.vqvae.to(torch.float32) # vqvae is producing nans in fp16
109
  pipe = pipe.to("cuda")
 
134
  from PIL import Image
135
 
136
  pipe = AmusedInpaintPipeline.from_pretrained(
137
+ "amused/amused-256", variant="fp16", torch_dtype=torch.float16
138
  )
139
  pipe.vqvae.to(torch.float32) # vqvae is producing nans in fp16
140
  pipe = pipe.to("cuda")
 
171
  from diffusers.utils import load_image
172
 
173
  pipe = AmusedInpaintPipeline.from_pretrained(
174
+ "amused/amused-512", variant="fp16", torch_dtype=torch.float16
175
  )
176
  pipe.vqvae.to(torch.float32) # vqvae is producing nans in fp16
177
  pipe = pipe.to("cuda")
 
239
  from diffusers import AmusedPipeline
240
 
241
  pipe = AmusedPipeline.from_pretrained(
242
+ "amused/amused-256", variant="fp16", torch_dtype=torch.float16
243
  )
244
 
245
  # HERE use torch.compile