vikhyatk commited on
Commit
af5f989
1 Parent(s): 5d82fdf

Upload Moondream

Browse files
Files changed (4) hide show
  1. generation_config.json +2 -0
  2. model.safetensors +2 -2
  3. moondream.py +2 -1
  4. vision_encoder.py +122 -25
generation_config.json CHANGED
@@ -1,4 +1,6 @@
1
  {
2
  "_from_model_config": true,
 
 
3
  "transformers_version": "4.38.2"
4
  }
 
1
  {
2
  "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
  "transformers_version": "4.38.2"
6
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b645a86282dbc3ce41a45acc9ec98e54a4cc3939ef32ca84591ca56046b0fed1
3
- size 3715037856
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7840817a7015edf729fa3d60099c35f08fc30511a1dc8ea231acd0e9a6555bb8
3
+ size 3733912224
moondream.py CHANGED
@@ -29,7 +29,8 @@ class Moondream(PreTrainedModel):
29
  return self.text_model.device
30
 
31
  def encode_image(self, image):
32
- return self.vision_encoder(image)
 
33
 
34
  def input_embeds(self, prompt, image_embeds, tokenizer):
35
  def _tokenize(txt):
 
29
  return self.text_model.device
30
 
31
  def encode_image(self, image):
32
+ with torch.no_grad():
33
+ return self.vision_encoder(image)
34
 
35
  def input_embeds(self, prompt, image_embeds, tokenizer):
36
  def _tokenize(txt):
vision_encoder.py CHANGED
@@ -1,7 +1,11 @@
 
 
 
1
  import torch
2
  import torch.nn.functional as F
3
  from torch import nn
4
  from einops import rearrange
 
5
  from torchvision.transforms.v2 import (
6
  Compose,
7
  Resize,
@@ -172,7 +176,7 @@ class VisionProjection(nn.Module):
172
  model_dim = 2048
173
  hidden_dim = model_dim * 4
174
 
175
- self.mlp = MLP(image_embedding_dim, hidden_dim, model_dim)
176
 
177
  @property
178
  def device(self):
@@ -182,6 +186,26 @@ class VisionProjection(nn.Module):
182
  return self.mlp(x)
183
 
184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
  class VisionEncoder(nn.Module):
186
 
187
  def __init__(self, use_flash_attn=False):
@@ -189,15 +213,7 @@ class VisionEncoder(nn.Module):
189
 
190
  self.encoder = EncoderWrapper(use_flash_attn)
191
  self.projection = VisionProjection()
192
-
193
- self.preprocess = Compose(
194
- [
195
- Resize(size=(378, 378), interpolation=InterpolationMode.BICUBIC),
196
- ToImage(),
197
- ToDtype(torch.float32, scale=True),
198
- Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
199
- ]
200
- )
201
 
202
  @property
203
  def device(self):
@@ -207,22 +223,103 @@ class VisionEncoder(nn.Module):
207
  def dtype(self):
208
  return self.projection.mlp.fc1.weight.dtype
209
 
210
- def __call__(self, images) -> torch.Tensor:
211
- if not isinstance(images, list) and not isinstance(images, torch.Tensor):
212
- images = [images]
 
 
 
 
 
 
 
 
 
 
 
213
 
214
- with torch.no_grad():
215
- # Skip preprocess if images are already tensors
216
- if not isinstance(images, torch.Tensor) and not isinstance(
217
- images[0], torch.Tensor
218
- ):
219
- images = [self.preprocess(image.convert("RGB")) for image in images]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
 
221
- if isinstance(images, list):
222
- images = torch.stack(images)
 
 
223
 
224
- x = images.to(self.device, dtype=self.dtype)
225
- x = self.encoder(x)
226
- x = self.projection(x)
227
 
228
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Union
2
+
3
+ import PIL.Image
4
  import torch
5
  import torch.nn.functional as F
6
  from torch import nn
7
  from einops import rearrange
8
+ import PIL
9
  from torchvision.transforms.v2 import (
10
  Compose,
11
  Resize,
 
176
  model_dim = 2048
177
  hidden_dim = model_dim * 4
178
 
179
+ self.mlp = MLP(image_embedding_dim * 2, hidden_dim, model_dim)
180
 
181
  @property
182
  def device(self):
 
186
  return self.mlp(x)
187
 
188
 
189
+ def create_patches(image, patch_size=(378, 378)):
190
+ assert image.dim() == 3, "Image must be in CHW format"
191
+
192
+ _, height, width = image.shape # Channels, Height, Width
193
+ patch_height, patch_width = patch_size
194
+
195
+ if height == patch_height and width == patch_width:
196
+ return []
197
+
198
+ # Iterate over the image and create patches
199
+ patches = []
200
+ for i in range(0, height, patch_height):
201
+ row_patches = []
202
+ for j in range(0, width, patch_width):
203
+ patch = image[:, i : i + patch_height, j : j + patch_width]
204
+ row_patches.append(patch)
205
+ patches.append(torch.stack(row_patches))
206
+ return patches
207
+
208
+
209
  class VisionEncoder(nn.Module):
210
 
211
  def __init__(self, use_flash_attn=False):
 
213
 
214
  self.encoder = EncoderWrapper(use_flash_attn)
215
  self.projection = VisionProjection()
216
+ self.supported_sizes = [(378, 378), (378, 756), (756, 378), (756, 756)]
 
 
 
 
 
 
 
 
217
 
218
  @property
219
  def device(self):
 
223
  def dtype(self):
224
  return self.projection.mlp.fc1.weight.dtype
225
 
226
+ def preprocess(self, image: PIL.Image.Image):
227
+ width, height = image.size
228
+ max_dim = max(width, height)
229
+ if max_dim < 512:
230
+ im_size = (378, 378)
231
+ else:
232
+ aspect_ratio = width / height
233
+ im_size = min(
234
+ self.supported_sizes,
235
+ key=lambda size: (
236
+ abs((size[1] / size[0]) - aspect_ratio),
237
+ abs(size[0] - width) + abs(size[1] - height),
238
+ ),
239
+ )
240
 
241
+ return Compose(
242
+ [
243
+ Resize(size=im_size, interpolation=InterpolationMode.BICUBIC),
244
+ ToImage(),
245
+ ToDtype(torch.float32, scale=True),
246
+ Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
247
+ ]
248
+ )(image)
249
+
250
+ def forward(
251
+ self, images: Union[PIL.Image.Image, list[PIL.Image.Image], torch.Tensor]
252
+ ) -> torch.Tensor:
253
+ im_list = None
254
+ if isinstance(images, torch.Tensor):
255
+ # Input must have dimensions (B, C, H, W)
256
+ assert (
257
+ len(images.shape) == 4
258
+ ), "Tensor input must have dimensions (B, C, H, W)"
259
+ im_list = list(images)
260
+ elif isinstance(images, PIL.Image.Image):
261
+ im_list = [images]
262
+ elif isinstance(images, list):
263
+ im_list = images
264
+ else:
265
+ raise ValueError(
266
+ "Input must be a PIL image, list of PIL images, or a tensor"
267
+ )
268
 
269
+ # Preprocess unless the images are already tensors (indicating that
270
+ # they have already been preprocessed)
271
+ if not isinstance(im_list[0], torch.Tensor):
272
+ im_list = [self.preprocess(im.convert("RGB")) for im in im_list]
273
 
274
+ patches = [create_patches(im) for im in im_list]
275
+ flat_patches = [patch for image_patches in patches for patch in image_patches]
 
276
 
277
+ # Images may be variable size, and need to be resized to a common size after
278
+ # creating patches.
279
+ resized_images = [
280
+ F.interpolate(im.unsqueeze(0), size=(378, 378), mode="bilinear")
281
+ for im in im_list
282
+ ]
283
+
284
+ combined_images = torch.cat([*resized_images, *flat_patches], dim=0)
285
+ combined_images = combined_images.to(self.device, dtype=self.dtype)
286
+
287
+ combined_features = self.encoder(combined_images)
288
+
289
+ full_img_features = combined_features[: len(im_list)]
290
+ patch_features = (
291
+ combined_features[len(im_list) :].transpose(1, 2).view(-1, 1152, 27, 27)
292
+ )
293
+
294
+ # Reshape patch features back to their original structure
295
+ reshaped_patch_features = []
296
+ patch_idx = 0
297
+ for i, patch_set in enumerate(patches):
298
+ if len(patch_set) == 0:
299
+ reshaped_patch_features.append(
300
+ full_img_features[i].transpose(0, 1).view(1152, 27, 27)
301
+ )
302
+ else:
303
+ sample_features = []
304
+ for row_patches in patch_set:
305
+ row_len = len(row_patches)
306
+ row_features = patch_features[
307
+ patch_idx : patch_idx + row_len
308
+ ] # row_len, T, C
309
+ row_features = torch.cat(
310
+ list(row_features), dim=2
311
+ ) # T, C * row_len
312
+ patch_idx += row_len
313
+ sample_features.append(row_features)
314
+ sample_features = torch.cat(sample_features, dim=1)
315
+ sample_features = F.interpolate(
316
+ sample_features.unsqueeze(0), size=(27, 27), mode="bilinear"
317
+ ).squeeze(0)
318
+ reshaped_patch_features.append(sample_features)
319
+ reshaped_patch_features = (
320
+ torch.stack(reshaped_patch_features).view(-1, 1152, 729).transpose(1, 2)
321
+ )
322
+
323
+ final_features = torch.cat([full_img_features, reshaped_patch_features], dim=2)
324
+
325
+ return self.projection(final_features)