HwwwH commited on
Commit
0f19d4b
1 Parent(s): 3ce0a28

adapt for transformers processing

Browse files
Files changed (1) hide show
  1. processing_minicpmv.py +2 -51
processing_minicpmv.py CHANGED
@@ -53,62 +53,13 @@ class MiniCPMVProcessor(ProcessorMixin):
53
  text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
54
  images: ImageInput = None,
55
  padding: Union[bool, str, PaddingStrategy] = False,
56
- truncation: Union[bool, str, TruncationStrategy] = None,
57
  max_length: Optional[int] = None,
58
  do_pad: Optional[bool] = True,
59
  return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
60
  ) -> MiniCPMVBatchFeature:
61
- """
62
- Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
63
- and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
64
- the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
65
- LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
66
- of the above two methods for more information.
67
-
68
- Args:
69
- text (`str`, `List[str]`, `List[List[str]]`):
70
- The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
71
- (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
72
- `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
73
- images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
74
- The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
75
- tensor. Both channels-first and channels-last formats are supported.
76
- padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
77
- Select a strategy to pad the returned sequences (according to the model's padding side and padding
78
- index) among:
79
- - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
80
- sequence if provided).
81
- - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
82
- acceptable input length for the model if that argument is not provided.
83
- - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
84
- lengths).
85
- max_length (`int`, *optional*):
86
- Maximum length of the returned list and optionally padding length (see above).
87
- do_pad (`bool`, *optional*, defaults to self.do_pad):
88
- Whether to pad the image. If `True` will pad the images in the batch to the largest image in the batch
89
- and create a pixel mask. Padding will be applied to the bottom and right of the image with zeros.
90
- truncation (`bool`, *optional*):
91
- Activates truncation to cut input sequences longer than `max_length` to `max_length`.
92
- return_tensors (`str` or [`~utils.TensorType`], *optional*):
93
- If set, will return tensors of a particular framework. Acceptable values are:
94
-
95
- - `'tf'`: Return TensorFlow `tf.constant` objects.
96
- - `'pt'`: Return PyTorch `torch.Tensor` objects.
97
- - `'np'`: Return NumPy `np.ndarray` objects.
98
- - `'jax'`: Return JAX `jnp.ndarray` objects.
99
-
100
- Returns:
101
- [`BatchFeature`]: A [`BatchFeature`] with the following fields:
102
-
103
- - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
104
- - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
105
- `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
106
- `None`).
107
- - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
108
- """
109
  if images is not None:
110
  image_inputs = self.image_processor(images, do_pad=do_pad, return_tensors=return_tensors)
111
- return self._convert_images_texts_to_inputs(image_inputs, text, max_length=max_length)
112
 
113
  # Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Llama
114
  def batch_decode(self, *args, **kwargs):
@@ -185,7 +136,7 @@ class MiniCPMVProcessor(ProcessorMixin):
185
  "pixel_values": images,
186
  "image_sizes": [image_sizes],
187
  "image_bounds": [image_bounds]
188
- })
189
 
190
  @property
191
  # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names
 
53
  text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
54
  images: ImageInput = None,
55
  padding: Union[bool, str, PaddingStrategy] = False,
 
56
  max_length: Optional[int] = None,
57
  do_pad: Optional[bool] = True,
58
  return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
59
  ) -> MiniCPMVBatchFeature:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  if images is not None:
61
  image_inputs = self.image_processor(images, do_pad=do_pad, return_tensors=return_tensors)
62
+ return self._convert_images_texts_to_inputs(image_inputs, text, max_length=max_length, return_tensors=return_tensors)
63
 
64
  # Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Llama
65
  def batch_decode(self, *args, **kwargs):
 
136
  "pixel_values": images,
137
  "image_sizes": [image_sizes],
138
  "image_bounds": [image_bounds]
139
+ }, tensor_type=return_tensors)
140
 
141
  @property
142
  # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names