Karroyan commited on
Commit
c5c6b91
·
verified ·
1 Parent(s): 7b0b3bf

Upload processing_keye.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. processing_keye.py +299 -0
processing_keye.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025 The Keye Team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ from typing import List, Union, TypedDict
21
+ import numpy as np
22
+ from transformers.feature_extraction_utils import BatchFeature
23
+ from transformers.processing_utils import (
24
+ ProcessorMixin,
25
+ Unpack,
26
+ )
27
+ from transformers.tokenization_utils_base import PreTokenizedInput, TextInput
28
+ import torch
29
+
30
+
31
+ ImageInput = Union[
32
+ "PIL.Image.Image",
33
+ np.ndarray,
34
+ "torch.Tensor",
35
+ List["PIL.Image.Image"],
36
+ List[np.ndarray],
37
+ List["torch.Tensor"],
38
+ ] # noqa
39
+
40
+
41
+ VideoInput = Union[
42
+ List["PIL.Image.Image"],
43
+ "np.ndarray",
44
+ "torch.Tensor",
45
+ List["np.ndarray"],
46
+ List["torch.Tensor"],
47
+ List[List["PIL.Image.Image"]],
48
+ List[List["np.ndarrray"]],
49
+ List[List["torch.Tensor"]],
50
+ ] # noqa
51
+
52
+
53
+ class KeyeVideosProcessorKwargs(TypedDict, total=False):
54
+ fps: Union[List[float], float]
55
+
56
+
57
+ class KeyeProcessorKwargs(TypedDict, total=False):
58
+ videos_kwargs: KeyeVideosProcessorKwargs
59
+
60
+
61
+ # Default values for processor kwargs
62
+ KEYE_PROCESSOR_DEFAULTS = {
63
+ "text_kwargs": {
64
+ "padding": False,
65
+ },
66
+ "videos_kwargs": {"fps": 2.0},
67
+ }
68
+
69
+
70
+ class KeyeProcessor(ProcessorMixin):
71
+ r"""
72
+ [`KeyeProcessor`] offers all the functionalities of [`SiglipImageProcessor`] and [`Qwen2TokenizerFast`]. See the
73
+ [`~KeyeProcessor.__call__`] and [`~KeyeProcessor.decode`] for more information.
74
+ Args:
75
+ image_processor ([`SiglipImageProcessor`], *optional*):
76
+ The image processor is a required input.
77
+ tokenizer ([`Qwen2TokenizerFast`], *optional*):
78
+ The tokenizer is a required input.
79
+ chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
80
+ in a chat into a tokenizable string.
81
+ """
82
+
83
+ attributes = ["image_processor", "tokenizer"]
84
+ valid_kwargs = [
85
+ "chat_template",
86
+ "image_std",
87
+ "min_pixels",
88
+ "image_mean",
89
+ "merge_size",
90
+ "image_processor_type",
91
+ "temporal_patch_size",
92
+ "patch_size",
93
+ "max_pixels",
94
+ ]
95
+
96
+ image_processor_class = "AutoImageProcessor"
97
+ tokenizer_class = ("Qwen2Tokenizer", "Qwen2TokenizerFast")
98
+
99
+ def __init__(
100
+ self, image_processor=None, tokenizer=None, chat_template=None, **kwargs
101
+ ):
102
+ self.image_token = (
103
+ "<|image_pad|>"
104
+ if not hasattr(tokenizer, "image_token")
105
+ else tokenizer.image_token
106
+ )
107
+ self.video_token = (
108
+ "<|video_pad|>"
109
+ if not hasattr(tokenizer, "video_token")
110
+ else tokenizer.video_token
111
+ )
112
+ super().__init__(image_processor, tokenizer, chat_template=chat_template)
113
+
114
+ def __call__(
115
+ self,
116
+ images: ImageInput = None,
117
+ text: Union[
118
+ TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]
119
+ ] = None,
120
+ videos: VideoInput = None,
121
+ **kwargs: Unpack[KeyeProcessorKwargs],
122
+ ) -> BatchFeature:
123
+ """
124
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
125
+ and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
126
+ the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwrags` arguments to
127
+ SiglipImageProcessor's [`~SiglipImageProcessor.__call__`] if `vision_infos` is not `None`.
128
+
129
+ Args:
130
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
131
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
132
+ tensor. Both channels-first and channels-last formats are supported.
133
+ text (`str`, `List[str]`, `List[List[str]]`):
134
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
135
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
136
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
137
+ videos (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
138
+ The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
139
+ tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
140
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
141
+ If set, will return tensors of a particular framework. Acceptable values are:
142
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
143
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
144
+ - `'np'`: Return NumPy `np.ndarray` objects.
145
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
146
+
147
+ Returns:
148
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
149
+
150
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
151
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
152
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
153
+ `None`).
154
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
155
+ - **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
156
+ - **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
157
+ - **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
158
+ - **second_per_grid_ts** -- List of video seconds per time grid. Returned when `videos` is not `None`.
159
+ """
160
+ output_kwargs = self._merge_kwargs(
161
+ KeyeProcessorKwargs,
162
+ tokenizer_init_kwargs=self.tokenizer.init_kwargs,
163
+ **kwargs,
164
+ )
165
+
166
+ if images is not None:
167
+ image_inputs = self.image_processor(images=images, return_tensors="pt")
168
+ image_inputs["pixel_values"] = image_inputs["pixel_values"]
169
+ image_grid_thw = image_inputs["image_grid_thw"]
170
+
171
+ else:
172
+ image_inputs = {}
173
+ image_grid_thw = None
174
+
175
+ if videos is not None:
176
+ # TODO: add video processing
177
+ videos_inputs = self.image_processor(
178
+ images=None, videos=videos, **output_kwargs["images_kwargs"]
179
+ )
180
+ video_grid_thw = videos_inputs["video_grid_thw"]
181
+
182
+ fps = output_kwargs["videos_kwargs"].pop("fps", 2.0)
183
+ if isinstance(fps, (int, float)):
184
+ second_per_grid_ts = [
185
+ self.image_processor.temporal_patch_size / fps
186
+ ] * len(video_grid_thw)
187
+ elif hasattr(fps, "__len__") and len(fps) == len(video_grid_thw):
188
+ second_per_grid_ts = [
189
+ self.image_processor.temporal_patch_size / tmp for tmp in fps
190
+ ]
191
+ else:
192
+ raise ValueError(
193
+ f"The length of fps ({len(fps) if hasattr(fps, '__len__') else fps}) must be equal to the length of video_grid_thw ({len(video_grid_thw)}) or fps should be a single number."
194
+ )
195
+ videos_inputs.update(
196
+ {"second_per_grid_ts": torch.tensor(second_per_grid_ts)}
197
+ )
198
+
199
+ else:
200
+ videos_inputs = {}
201
+ video_grid_thw = None
202
+
203
+ if not isinstance(text, list):
204
+ text = [text]
205
+
206
+ if image_grid_thw is not None:
207
+ index = 0
208
+ for i in range(len(text)):
209
+ while self.image_token in text[i]:
210
+ text[i] = text[i].replace(
211
+ self.image_token,
212
+ "<|placeholder|>"
213
+ * (
214
+ image_grid_thw[index].prod()
215
+ // self.image_processor.merge_size
216
+ // self.image_processor.merge_size
217
+ ),
218
+ 1,
219
+ )
220
+ index += 1
221
+ text[i] = text[i].replace("<|placeholder|>", self.image_token)
222
+
223
+ if video_grid_thw is not None:
224
+ index = 0
225
+ for i in range(len(text)):
226
+ while self.video_token in text[i]:
227
+ text[i] = text[i].replace(
228
+ self.video_token,
229
+ "<|placeholder|>"
230
+ * (
231
+ video_grid_thw[index].prod()
232
+ // self.image_processor.merge_size
233
+ // self.image_processor.merge_size
234
+ ),
235
+ 1,
236
+ )
237
+ index += 1
238
+ text[i] = text[i].replace("<|placeholder|>", self.video_token)
239
+
240
+ text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
241
+
242
+ return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs})
243
+
244
+ def batch_decode(self, *args, **kwargs):
245
+ """
246
+ This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
247
+ refer to the docstring of this method for more information.
248
+ """
249
+ return self.tokenizer.batch_decode(*args, **kwargs)
250
+
251
+ def decode(self, *args, **kwargs):
252
+ """
253
+ This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
254
+ the docstring of this method for more information.
255
+ """
256
+ return self.tokenizer.decode(*args, **kwargs)
257
+
258
+ def post_process_image_text_to_text(
259
+ self,
260
+ generated_outputs,
261
+ skip_special_tokens=True,
262
+ clean_up_tokenization_spaces=False,
263
+ **kwargs,
264
+ ):
265
+ """
266
+ Post-process the output of the model to decode the text.
267
+
268
+ Args:
269
+ generated_outputs (`torch.Tensor` or `np.ndarray`):
270
+ The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
271
+ or `(sequence_length,)`.
272
+ skip_special_tokens (`bool`, *optional*, defaults to `True`):
273
+ Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
274
+ Clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
275
+ Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
276
+ **kwargs:
277
+ Additional arguments to be passed to the tokenizer's `batch_decode method`.
278
+
279
+ Returns:
280
+ `List[str]`: The decoded text.
281
+ """
282
+ return self.tokenizer.batch_decode(
283
+ generated_outputs,
284
+ skip_special_tokens=skip_special_tokens,
285
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
286
+ **kwargs,
287
+ )
288
+
289
+ @property
290
+ def model_input_names(self):
291
+ tokenizer_input_names = self.tokenizer.model_input_names
292
+ image_processor_input_names = self.image_processor.model_input_names
293
+ names_from_processor = list(
294
+ dict.fromkeys(tokenizer_input_names + image_processor_input_names)
295
+ )
296
+ return names_from_processor + ["second_per_grid_ts"]
297
+
298
+
299
+ __all__ = ["KeyeProcessor", "KeyeProcessor_moonvit", "KeyeProcessor"]