Zhihui commited on
Commit
1a6ae74
1 Parent(s): e8c20f3

Upload tokenizer

Browse files
qwen.tiktoken ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "eos_token": "<|endoftext|>",
3
+ "pad_token": "<|endoftext|>"
4
+ }
tokenization_qwen.py ADDED
@@ -0,0 +1,590 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ """Tokenization classes for QWen."""
7
+
8
+ import base64
9
+ import logging
10
+ import os
11
+ import requests
12
+ import unicodedata
13
+ from typing import Collection, Dict, List, Set, Tuple, Union, Any, Callable, Optional
14
+
15
+ import tiktoken
16
+ import numpy as np
17
+ from PIL import Image
18
+ from PIL import ImageFont
19
+ from PIL import ImageDraw
20
+ from transformers import PreTrainedTokenizer, AddedToken
21
+ from transformers.utils import try_to_load_from_cache
22
+
23
+ import matplotlib.colors as mcolors
24
+ from matplotlib.font_manager import FontProperties
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "qwen.tiktoken", "ttf": "SimSun.ttf"}
30
+ FONT_PATH = try_to_load_from_cache("Qwen/Qwen-VL-Chat", "SimSun.ttf")
31
+ if FONT_PATH is None:
32
+ if not os.path.exists("SimSun.ttf"):
33
+ ttf = requests.get("https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/SimSun.ttf")
34
+ open("SimSun.ttf", "wb").write(ttf.content)
35
+ FONT_PATH = "SimSun.ttf"
36
+
37
+ PAT_STR = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
38
+ ENDOFTEXT = "<|endoftext|>"
39
+ IMSTART = "<|im_start|>"
40
+ IMEND = "<|im_end|>"
41
+ # as the default behavior is changed to allow special tokens in
42
+ # regular texts, the surface forms of special tokens need to be
43
+ # as different as possible to minimize the impact
44
+ EXTRAS = tuple((f"<|extra_{i}|>" for i in range(205)))
45
+ SPECIAL_TOKENS = (
46
+ ENDOFTEXT,
47
+ IMSTART,
48
+ IMEND,
49
+ ) + EXTRAS
50
+ IMG_TOKEN_SPAN = 256
51
+
52
+
53
+ def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:
54
+ with open(tiktoken_bpe_file, "rb") as f:
55
+ contents = f.read()
56
+ return {
57
+ base64.b64decode(token): int(rank)
58
+ for token, rank in (line.split() for line in contents.splitlines() if line)
59
+ }
60
+
61
+ def _list_find(
62
+ input_list: List[Any],
63
+ candidates: Tuple[Any],
64
+ start: int = 0,
65
+ ):
66
+ for i in range(start, len(input_list)):
67
+ if input_list[i] in candidates:
68
+ return i
69
+ return -1
70
+
71
+ def _replace_closed_tag(
72
+ input_tokens: List[Any],
73
+ start_tags: Union[Any, Tuple[Any]],
74
+ end_tags: Union[Any, Tuple[Any]],
75
+ inclusive_replace_func: Callable,
76
+ exclusive_replace_func: Callable = lambda x: x,
77
+ ):
78
+ if isinstance(start_tags, (str, int)):
79
+ start_tags = (start_tags,)
80
+ if isinstance(end_tags, (str, int)):
81
+ end_tags = (end_tags,)
82
+ assert len(start_tags) == len(end_tags)
83
+
84
+ output_tokens = []
85
+ end = 0
86
+ while True:
87
+ start = _list_find(input_tokens, start_tags, end)
88
+ if start == -1:
89
+ break
90
+ output_tokens.extend(exclusive_replace_func(input_tokens[end : start]))
91
+ tag_idx = start_tags.index(input_tokens[start])
92
+ end = _list_find(input_tokens, (end_tags[tag_idx],), start)
93
+ if end == -1:
94
+ raise ValueError("Unclosed image token")
95
+ output_tokens.extend(inclusive_replace_func(input_tokens[start : end + 1]))
96
+ end += 1
97
+ output_tokens.extend(exclusive_replace_func(input_tokens[end : ]))
98
+ return output_tokens
99
+
100
+ class QWenTokenizer(PreTrainedTokenizer):
101
+ """QWen tokenizer."""
102
+
103
+ vocab_files_names = VOCAB_FILES_NAMES
104
+
105
+ def __init__(
106
+ self,
107
+ vocab_file,
108
+ errors="replace",
109
+ image_start_tag='<img>',
110
+ image_end_tag='</img>',
111
+ image_pad_tag='<imgpad>',
112
+ ref_start_tag='<ref>',
113
+ ref_end_tag='</ref>',
114
+ box_start_tag='<box>',
115
+ box_end_tag='</box>',
116
+ quad_start_tag='<quad>',
117
+ quad_end_tag='</quad>',
118
+ **kwargs,
119
+ ):
120
+ super().__init__(**kwargs)
121
+ self.image_start_tag = image_start_tag
122
+ self.image_end_tag = image_end_tag
123
+ self.image_pad_tag = image_pad_tag
124
+ self.ref_start_tag = ref_start_tag
125
+ self.ref_end_tag = ref_end_tag
126
+ self.box_start_tag = box_start_tag
127
+ self.box_end_tag = box_end_tag
128
+ self.quad_start_tag = quad_start_tag
129
+ self.quad_end_tag = quad_end_tag
130
+ self.IMAGE_ST = (
131
+ ref_start_tag, ref_end_tag,
132
+ box_start_tag, box_end_tag,
133
+ quad_start_tag, quad_end_tag,
134
+ image_start_tag, image_end_tag,
135
+ image_pad_tag
136
+ )
137
+
138
+ self.errors = errors # how to handle errors in decoding
139
+
140
+ self.mergeable_ranks = _load_tiktoken_bpe(vocab_file) # type: dict[bytes, int]
141
+ self.special_tokens = {
142
+ token: index
143
+ for index, token in enumerate(
144
+ SPECIAL_TOKENS + self.IMAGE_ST, start=len(self.mergeable_ranks)
145
+ )
146
+ }
147
+ self.img_start_id = self.special_tokens[self.image_start_tag]
148
+ self.img_end_id = self.special_tokens[self.image_end_tag]
149
+ self.img_pad_id = self.special_tokens[self.image_pad_tag]
150
+ self.ref_start_id = self.special_tokens[self.ref_start_tag]
151
+ self.ref_end_id = self.special_tokens[self.ref_end_tag]
152
+ self.box_start_id = self.special_tokens[self.box_start_tag]
153
+ self.box_end_id = self.special_tokens[self.box_end_tag]
154
+ self.quad_start_id = self.special_tokens[self.quad_start_tag]
155
+ self.quad_end_id = self.special_tokens[self.quad_end_tag]
156
+
157
+ enc = tiktoken.Encoding(
158
+ "Qwen",
159
+ pat_str=PAT_STR,
160
+ mergeable_ranks=self.mergeable_ranks,
161
+ special_tokens=self.special_tokens,
162
+ )
163
+ assert (
164
+ len(self.mergeable_ranks) + len(self.special_tokens) == enc.n_vocab
165
+ ), f"{len(self.mergeable_ranks) + len(self.special_tokens)} != {enc.n_vocab} in encoding"
166
+
167
+ self.decoder = {
168
+ v: k for k, v in self.mergeable_ranks.items()
169
+ } # type: dict[int, bytes|str]
170
+ self.decoder.update({v: k for k, v in self.special_tokens.items()})
171
+
172
+ self.tokenizer = enc # type: tiktoken.Encoding
173
+
174
+ self.eod_id = self.tokenizer.eot_token
175
+ self.im_start_id = self.special_tokens[IMSTART]
176
+ self.im_end_id = self.special_tokens[IMEND]
177
+
178
+ def __getstate__(self):
179
+ # for pickle lovers
180
+ state = self.__dict__.copy()
181
+ del state['tokenizer']
182
+ return state
183
+
184
+ def __setstate__(self, state):
185
+ # tokenizer is not python native; don't pass it; rebuild it
186
+ self.__dict__.update(state)
187
+ enc = tiktoken.Encoding(
188
+ "Qwen",
189
+ pat_str=PAT_STR,
190
+ mergeable_ranks=self.mergeable_ranks,
191
+ special_tokens=self.special_tokens,
192
+ )
193
+ self.tokenizer = enc
194
+
195
+
196
+ def __len__(self) -> int:
197
+ return self.tokenizer.n_vocab
198
+
199
+ def get_vocab(self) -> Dict[bytes, int]:
200
+ return self.mergeable_ranks
201
+
202
+ def convert_tokens_to_ids(
203
+ self, tokens: Union[bytes, str, List[Union[bytes, str]]]
204
+ ) -> List[int]:
205
+ ids = []
206
+ if isinstance(tokens, (str, bytes)):
207
+ if tokens in self.special_tokens:
208
+ return self.special_tokens[tokens]
209
+ else:
210
+ return self.mergeable_ranks.get(tokens)
211
+ for token in tokens:
212
+ if token in self.special_tokens:
213
+ ids.append(self.special_tokens[token])
214
+ else:
215
+ ids.append(self.mergeable_ranks.get(token))
216
+ return ids
217
+
218
+ def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
219
+ if not special_tokens and new_tokens:
220
+ raise ValueError('Adding regular tokens is not supported')
221
+ for token in new_tokens:
222
+ surface_form = token.content if isinstance(token, AddedToken) else token
223
+ if surface_form not in SPECIAL_TOKENS + self.IMAGE_ST:
224
+ raise ValueError('Adding unknown special tokens is not supported')
225
+ return 0
226
+
227
+ def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:
228
+ """
229
+ Save only the vocabulary of the tokenizer (vocabulary).
230
+
231
+ Returns:
232
+ `Tuple(str)`: Paths to the files saved.
233
+ """
234
+ file_path = os.path.join(save_directory, "qwen.tiktoken")
235
+ with open(file_path, "w", encoding="utf8") as w:
236
+ for k, v in self.mergeable_ranks.items():
237
+ line = base64.b64encode(k).decode("utf8") + " " + str(v) + "\n"
238
+ w.write(line)
239
+ return (file_path,)
240
+
241
+ def tokenize(
242
+ self,
243
+ text: str,
244
+ allowed_special: Union[Set, str] = "all",
245
+ disallowed_special: Union[Collection, str] = (),
246
+ **kwargs,
247
+ ) -> List[Union[bytes, str]]:
248
+ """
249
+ Converts a string in a sequence of tokens.
250
+
251
+ Args:
252
+ text (`str`):
253
+ The sequence to be encoded.
254
+ allowed_special (`Literal["all"]` or `set`):
255
+ The surface forms of the tokens to be encoded as special tokens in regular texts.
256
+ Default to "all".
257
+ disallowed_special (`Literal["all"]` or `Collection`):
258
+ The surface forms of the tokens that should not be in regular texts and trigger errors.
259
+ Default to an empty tuple.
260
+
261
+ kwargs (additional keyword arguments, *optional*):
262
+ Will be passed to the underlying model specific encode method.
263
+
264
+ Returns:
265
+ `List[bytes|str]`: The list of tokens.
266
+ """
267
+ tokens = []
268
+ text = unicodedata.normalize("NFC", text)
269
+
270
+ # this implementation takes a detour: text -> token id -> token surface forms
271
+ for t in self.tokenizer.encode(
272
+ text, allowed_special=allowed_special, disallowed_special=disallowed_special
273
+ ):
274
+ tokens.append(self.decoder[t])
275
+
276
+ def _encode_imgurl(img_tokens):
277
+ assert img_tokens[0] == self.image_start_tag and img_tokens[-1] == self.image_end_tag
278
+ img_tokens = img_tokens[1:-1]
279
+ img_url = b''.join(img_tokens)
280
+ out_img_tokens = list(map(self.decoder.get, img_url))
281
+ if len(out_img_tokens) > IMG_TOKEN_SPAN:
282
+ raise ValueError("The content in {}..{} is too long".format(
283
+ self.image_start_tag, self.image_end_tag))
284
+ out_img_tokens.extend([self.image_pad_tag] * (IMG_TOKEN_SPAN - len(out_img_tokens)))
285
+ out_img_tokens = [self.image_start_tag] + out_img_tokens + [self.image_end_tag]
286
+ return out_img_tokens
287
+
288
+ return _replace_closed_tag(tokens, self.image_start_tag, self.image_end_tag, _encode_imgurl)
289
+
290
+ def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:
291
+ """
292
+ Converts a sequence of tokens in a single string.
293
+ """
294
+ text = ""
295
+ temp = b""
296
+ for t in tokens:
297
+ if isinstance(t, str):
298
+ if temp:
299
+ text += temp.decode("utf-8", errors=self.errors)
300
+ temp = b""
301
+ text += t
302
+ elif isinstance(t, bytes):
303
+ temp += t
304
+ else:
305
+ raise TypeError("token should only be of type types or str")
306
+ if temp:
307
+ text += temp.decode("utf-8", errors=self.errors)
308
+ return text
309
+
310
+ @property
311
+ def vocab_size(self):
312
+ return self.tokenizer.n_vocab
313
+
314
+ def _convert_id_to_token(self, index: int) -> Union[bytes, str]:
315
+ """Converts an id to a token, special tokens included"""
316
+ if index in self.decoder:
317
+ return self.decoder[index]
318
+ raise ValueError("unknown ids")
319
+
320
+ def _convert_token_to_id(self, token: Union[bytes, str]) -> int:
321
+ """Converts a token to an id using the vocab, special tokens included"""
322
+ if token in self.special_tokens:
323
+ return self.special_tokens[token]
324
+ if token in self.mergeable_ranks:
325
+ return self.mergeable_ranks[token]
326
+ raise ValueError("unknown token")
327
+
328
+ def _tokenize(self, text: str, **kwargs):
329
+ """
330
+ Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
331
+ vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
332
+
333
+ Do NOT take care of added tokens.
334
+ """
335
+ raise NotImplementedError
336
+
337
+ def _decode(
338
+ self,
339
+ token_ids: Union[int, List[int]],
340
+ skip_special_tokens: bool = False,
341
+ errors: str = None,
342
+ **kwargs,
343
+ ) -> str:
344
+ if isinstance(token_ids, int):
345
+ token_ids = [token_ids]
346
+
347
+ def _decode_imgurl(img_token_ids):
348
+ assert img_token_ids[0] == self.img_start_id and img_token_ids[-1] == self.img_end_id
349
+ img_token_ids = img_token_ids[1:-1]
350
+ img_token_ids = img_token_ids[ : img_token_ids.index(self.img_pad_id)]
351
+ img_url = bytes(img_token_ids).decode('utf-8')
352
+ return [self.img_start_id] + self.tokenizer.encode(img_url) + [self.img_end_id]
353
+
354
+ token_ids = _replace_closed_tag(token_ids, self.img_start_id, self.img_end_id, _decode_imgurl)
355
+
356
+ if skip_special_tokens:
357
+ token_ids = [i for i in token_ids if i < self.eod_id]
358
+ return self.tokenizer.decode(token_ids, errors=errors or self.errors)
359
+
360
+ def to_list_format(self, text: str):
361
+ text = unicodedata.normalize("NFC", text)
362
+ token_ids = self.tokenizer.encode(
363
+ text, allowed_special=set(self.IMAGE_ST + (ENDOFTEXT,)))
364
+
365
+ def _encode_vl_info(tokens):
366
+ if len(tokens) == 0:
367
+ return []
368
+ if tokens[0] == self.img_start_id and tokens[-1] == self.img_end_id:
369
+ key = 'image'
370
+ elif tokens[0] == self.ref_start_id and tokens[-1] == self.ref_end_id:
371
+ key = 'ref'
372
+ elif tokens[0] == self.box_start_id and tokens[-1] == self.box_end_id:
373
+ key = 'box'
374
+ elif tokens[0] == self.quad_start_id and tokens[-1] == self.quad_end_id:
375
+ key = 'quad'
376
+ else:
377
+ _tobytes = lambda x: x.encode('utf-8') if isinstance(x, str) else x
378
+ return [{'text': b''.join(map(_tobytes, map(self.decoder.get, tokens))).decode('utf-8')}]
379
+ _tobytes = lambda x: x.encode('utf-8') if isinstance(x, str) else x
380
+ val = b''.join(map(_tobytes, map(self.decoder.get, tokens[1:-1]))).decode('utf-8')
381
+ return [{key: val}]
382
+
383
+ return _replace_closed_tag(
384
+ token_ids,
385
+ (self.img_start_id, self.ref_start_id, self.box_start_id, self.quad_start_id),
386
+ (self.img_end_id, self.ref_end_id, self.box_end_id, self.quad_end_id),
387
+ _encode_vl_info,
388
+ _encode_vl_info,
389
+ )
390
+
391
+ def from_list_format(self, list_format: List[Dict]):
392
+ text = ''
393
+ num_images = 0
394
+ for ele in list_format:
395
+ if 'image' in ele:
396
+ num_images += 1
397
+ text += f'Picture {num_images}: '
398
+ text += self.image_start_tag + ele['image'] + self.image_end_tag
399
+ text += '\n'
400
+ elif 'text' in ele:
401
+ text += ele['text']
402
+ elif 'box' in ele:
403
+ if 'ref' in ele:
404
+ text += self.ref_start_tag + ele['ref'] + self.ref_end_tag
405
+ for box in ele['box']:
406
+ text += self.box_start_tag + '(%d,%d),(%d,%d)' % (box[0], box[1], box[2], box[3]) + self.box_end_tag
407
+ else:
408
+ raise ValueError("Unsupport element: " + str(ele))
409
+ return text
410
+
411
+ def _fetch_latest_picture(self, response, history):
412
+ if history is None:
413
+ history = []
414
+ _history = history + [(response, None)]
415
+ for q, r in _history[::-1]:
416
+ for ele in self.to_list_format(q)[::-1]:
417
+ if 'image' in ele:
418
+ return ele['image']
419
+ return None
420
+
421
+ def _fetch_all_box_with_ref(self, text):
422
+ list_format = self.to_list_format(text)
423
+ output = []
424
+ for i, ele in enumerate(list_format):
425
+ if 'box' in ele:
426
+ bbox = tuple(map(int, ele['box'].replace('(', '').replace(')', '').split(',')))
427
+ assert len(bbox) == 4
428
+ output.append({'box': bbox})
429
+ if i > 0 and 'ref' in list_format[i-1]:
430
+ output[-1]['ref'] = list_format[i-1]['ref'].strip()
431
+ return output
432
+
433
+ def draw_bbox_on_latest_picture(
434
+ self,
435
+ response,
436
+ history=None,
437
+ ) -> Optional[Image.Image]:
438
+ image = self._fetch_latest_picture(response, history)
439
+ if image is None:
440
+ return None
441
+ if image.startswith("http://") or image.startswith("https://"):
442
+ image = Image.open(requests.get(image, stream=True).raw).convert("RGB")
443
+ h, w = image.height, image.width
444
+ else:
445
+ image = np.asarray(Image.open(image).convert("RGB"))
446
+ h, w = image.shape[0], image.shape[1]
447
+ visualizer = Visualizer(image)
448
+
449
+ boxes = self._fetch_all_box_with_ref(response)
450
+ if not boxes:
451
+ return None
452
+ color = random.choice([_ for _ in mcolors.TABLEAU_COLORS.keys()]) # init color
453
+ for box in boxes:
454
+ if 'ref' in box: # random new color for new refexps
455
+ color = random.choice([_ for _ in mcolors.TABLEAU_COLORS.keys()])
456
+ x1, y1, x2, y2 = box['box']
457
+ x1, y1, x2, y2 = (int(x1 / 1000 * w), int(y1 / 1000 * h), int(x2 / 1000 * w), int(y2 / 1000 * h))
458
+ visualizer.draw_box((x1, y1, x2, y2), alpha=1, edge_color=color)
459
+ if 'ref' in box:
460
+ visualizer.draw_text(box['ref'], (x1, y1), color=color, horizontal_alignment="left")
461
+ return visualizer.output
462
+
463
+
464
+ import colorsys
465
+ import logging
466
+ import math
467
+ import numpy as np
468
+ import matplotlib as mpl
469
+ import matplotlib.colors as mplc
470
+ import matplotlib.figure as mplfigure
471
+ import torch
472
+ from matplotlib.backends.backend_agg import FigureCanvasAgg
473
+ from PIL import Image
474
+ import random
475
+
476
+ logger = logging.getLogger(__name__)
477
+
478
+
479
+ class VisImage:
480
+ def __init__(self, img, scale=1.0):
481
+ self.img = img
482
+ self.scale = scale
483
+ self.width, self.height = img.shape[1], img.shape[0]
484
+ self._setup_figure(img)
485
+
486
+ def _setup_figure(self, img):
487
+ fig = mplfigure.Figure(frameon=False)
488
+ self.dpi = fig.get_dpi()
489
+ # add a small 1e-2 to avoid precision lost due to matplotlib's truncation
490
+ # (https://github.com/matplotlib/matplotlib/issues/15363)
491
+ fig.set_size_inches(
492
+ (self.width * self.scale + 1e-2) / self.dpi,
493
+ (self.height * self.scale + 1e-2) / self.dpi,
494
+ )
495
+ self.canvas = FigureCanvasAgg(fig)
496
+ # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)
497
+ ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
498
+ ax.axis("off")
499
+ self.fig = fig
500
+ self.ax = ax
501
+ self.reset_image(img)
502
+
503
+ def reset_image(self, img):
504
+ img = img.astype("uint8")
505
+ self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest")
506
+
507
+ def save(self, filepath):
508
+ self.fig.savefig(filepath)
509
+
510
+ def get_image(self):
511
+ canvas = self.canvas
512
+ s, (width, height) = canvas.print_to_buffer()
513
+
514
+ buffer = np.frombuffer(s, dtype="uint8")
515
+
516
+ img_rgba = buffer.reshape(height, width, 4)
517
+ rgb, alpha = np.split(img_rgba, [3], axis=2)
518
+ return rgb.astype("uint8")
519
+
520
+
521
+ class Visualizer:
522
+ def __init__(self, img_rgb, metadata=None, scale=1.0):
523
+ self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
524
+ self.font_path = FONT_PATH
525
+ self.output = VisImage(self.img, scale=scale)
526
+ self.cpu_device = torch.device("cpu")
527
+
528
+ # too small texts are useless, therefore clamp to 14
529
+ self._default_font_size = max(
530
+ np.sqrt(self.output.height * self.output.width) // 30, 15 // scale
531
+ )
532
+
533
+ def draw_text(
534
+ self,
535
+ text,
536
+ position,
537
+ *,
538
+ font_size=None,
539
+ color="g",
540
+ horizontal_alignment="center",
541
+ rotation=0,
542
+ ):
543
+ if not font_size:
544
+ font_size = self._default_font_size
545
+
546
+ # since the text background is dark, we don't want the text to be dark
547
+ color = np.maximum(list(mplc.to_rgb(color)), 0.2)
548
+ color[np.argmax(color)] = max(0.8, np.max(color))
549
+
550
+ x, y = position
551
+ self.output.ax.text(
552
+ x,
553
+ y,
554
+ text,
555
+ size=font_size * self.output.scale,
556
+ fontproperties=FontProperties(fname=self.font_path),
557
+ bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"},
558
+ verticalalignment="top",
559
+ horizontalalignment=horizontal_alignment,
560
+ color=color,
561
+ zorder=10,
562
+ rotation=rotation,
563
+ )
564
+ return self.output
565
+
566
+ def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"):
567
+
568
+ x0, y0, x1, y1 = box_coord
569
+ width = x1 - x0
570
+ height = y1 - y0
571
+
572
+ linewidth = max(self._default_font_size / 4, 1)
573
+
574
+ self.output.ax.add_patch(
575
+ mpl.patches.Rectangle(
576
+ (x0, y0),
577
+ width,
578
+ height,
579
+ fill=False,
580
+ edgecolor=edge_color,
581
+ linewidth=linewidth * self.output.scale,
582
+ alpha=alpha,
583
+ linestyle=line_style,
584
+ )
585
+ )
586
+ return self.output
587
+
588
+ def get_output(self):
589
+
590
+ return self.output
tokenizer_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoTokenizer": [
4
+ "tokenization_qwen.QWenTokenizer",
5
+ null
6
+ ]
7
+ },
8
+ "clean_up_tokenization_spaces": true,
9
+ "model_max_length": 2048,
10
+ "padding_side": "right",
11
+ "tokenizer_class": "QWenTokenizer"
12
+ }