Commit
·
7f158ff
1
Parent(s):
0312396
Update app.py (#6)
Browse files- Update app.py (3373014c5d6a6a49ca973739e58012fa3d5d8cde)
Co-authored-by: Saptarshi Mukherjee <zombie-596@users.noreply.huggingface.co>
app.py
CHANGED
|
@@ -6,6 +6,21 @@ import numpy as np
|
|
| 6 |
import tensorflow as tf
|
| 7 |
import math
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
class Block(nn.Module):
|
| 10 |
def __init__(self, in_ch, out_ch, time_emb_dim, up=False):
|
| 11 |
super().__init__()
|
|
@@ -153,26 +168,24 @@ def p_sample_loop(model, shape):
|
|
| 153 |
def sample(model, image_size, batch_size=16, channels=3):
|
| 154 |
return p_sample_loop(model, shape=(batch_size, channels, image_size, image_size))
|
| 155 |
|
| 156 |
-
samples = sample(model, image_size=img_size, batch_size=64, channels=3)
|
| 157 |
-
|
| 158 |
|
| 159 |
-
reverse_transforms = transforms.Compose([
|
| 160 |
-
transforms.Lambda(lambda t: (t + 1) / 2),
|
| 161 |
-
transforms.Lambda(lambda t: t.permute(1, 2, 0)), # CHW to HWC
|
| 162 |
-
transforms.Lambda(lambda t: t * 255.),
|
| 163 |
-
transforms.Lambda(lambda t: t.numpy().astype(np.uint8)),
|
| 164 |
-
transforms.ToPILImage(),
|
| 165 |
-
])
|
| 166 |
|
| 167 |
-
for i in range(10):
|
| 168 |
-
img = reverse_transforms(torch.Tensor((samples[-1][i].reshape(3, img_size, img_size))))
|
| 169 |
-
plt.imshow(img)
|
| 170 |
model = SimpleUnet()
|
| 171 |
|
| 172 |
st.title("Generatig images using a diffusion model")
|
| 173 |
model.load_state_dict(torch.load("new_linear_model_1090.pt"))
|
| 174 |
|
| 175 |
-
result = st.button("Click to generate image")
|
| 176 |
|
| 177 |
-
if(
|
| 178 |
-
model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
import tensorflow as tf
|
| 7 |
import math
|
| 8 |
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def linear_beta_schedule(timesteps):
|
| 12 |
+
beta_start = 0.0001
|
| 13 |
+
beta_end = 0.02
|
| 14 |
+
return torch.linspace(beta_start, beta_end, timesteps)
|
| 15 |
+
|
| 16 |
+
alphas = 1. - betas
|
| 17 |
+
alphas_cumprod = torch.cumprod(alphas, axis=0)
|
| 18 |
+
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value=1.0)
|
| 19 |
+
sqrt_recip_alphas = torch.sqrt(1.0 / alphas)
|
| 20 |
+
sqrt_alphas_cumprod = torch.sqrt(alphas_cumprod)
|
| 21 |
+
sqrt_one_minus_alphas_cumprod = torch.sqrt(1. - alphas_cumprod)
|
| 22 |
+
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
|
| 23 |
+
|
| 24 |
class Block(nn.Module):
|
| 25 |
def __init__(self, in_ch, out_ch, time_emb_dim, up=False):
|
| 26 |
super().__init__()
|
|
|
|
| 168 |
def sample(model, image_size, batch_size=16, channels=3):
|
| 169 |
return p_sample_loop(model, shape=(batch_size, channels, image_size, image_size))
|
| 170 |
|
|
|
|
|
|
|
| 171 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 172 |
|
|
|
|
|
|
|
|
|
|
| 173 |
model = SimpleUnet()
|
| 174 |
|
| 175 |
st.title("Generatig images using a diffusion model")
|
| 176 |
model.load_state_dict(torch.load("new_linear_model_1090.pt"))
|
| 177 |
|
|
|
|
| 178 |
|
| 179 |
+
if(st.button("Click to generate image")):
|
| 180 |
+
samples = sample(model, image_size=img_size, batch_size=64, channels=3)
|
| 181 |
+
for i in range(10):
|
| 182 |
+
reverse_transforms = transforms.Compose([
|
| 183 |
+
transforms.Lambda(lambda t: (t + 1) / 2),
|
| 184 |
+
transforms.Lambda(lambda t: t.permute(1, 2, 0)), # CHW to HWC
|
| 185 |
+
transforms.Lambda(lambda t: t * 255.),
|
| 186 |
+
transforms.Lambda(lambda t: t.numpy().astype(np.uint8)),
|
| 187 |
+
transforms.ToPILImage(),
|
| 188 |
+
])
|
| 189 |
+
img = reverse_transforms(torch.Tensor((samples[-1][i].reshape(3, img_size, img_size))))
|
| 190 |
+
|
| 191 |
+
st.image(plt.imshow(img))
|