infgrad commited on
Commit
9ec969b
1 Parent(s): fc73791

Upload 15 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644,
5
+ "<|jasper_img_end|>": 151648,
6
+ "<|jasper_img_start|>": 151646,
7
+ "<|jasper_img_token|>": 151647
8
+ }
config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "JasperVL"
4
+ ],
5
+ "auto_map": {
6
+ "AutoModel": "modeling_jasper_vl.JasperVL",
7
+ "AutoConfig": "configuration_jasper_vl.JasperVLConfig"
8
+ },
9
+ "img_end_token": "<|jasper_img_end|>",
10
+ "img_end_token_id": 151648,
11
+ "img_start_token": "<|jasper_img_start|>",
12
+ "img_start_token_id": 151646,
13
+ "img_token": "<|jasper_img_token|>",
14
+ "img_token_id": 151647,
15
+ "is_text_encoder": false,
16
+ "model_type": "jasper_vl",
17
+ "num_img_tokens": 300,
18
+ "text_config": {
19
+ "_attn_implementation_autoset": true,
20
+ "architectures": [
21
+ "JasperVL"
22
+ ],
23
+ "bos_token_id": 151643,
24
+ "eos_token_id": 151643,
25
+ "hidden_size": 1536,
26
+ "intermediate_size": 8960,
27
+ "max_position_embeddings": 131072,
28
+ "max_window_layers": 21,
29
+ "model_type": "qwen2",
30
+ "num_attention_heads": 12,
31
+ "num_hidden_layers": 28,
32
+ "num_key_value_heads": 2,
33
+ "rope_theta": 1000000.0,
34
+ "torch_dtype": "float32",
35
+ "vocab_size": 151649
36
+ },
37
+ "torch_dtype": "bfloat16",
38
+ "transformers_version": "4.46.1",
39
+ "vector_dim": 12288,
40
+ "vector_dropout_p": 0.2,
41
+ "vision_config": {
42
+ "_attn_implementation_autoset": true,
43
+ "hidden_size": 1152,
44
+ "image_size": 384,
45
+ "intermediate_size": 4304,
46
+ "model_type": "siglip_vision_model",
47
+ "num_attention_heads": 16,
48
+ "num_hidden_layers": 27,
49
+ "patch_size": 14
50
+ }
51
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "3.0.1",
4
+ "transformers": "4.42.3",
5
+ "pytorch": "2.3.1+cu121"
6
+ },
7
+ "prompts": {
8
+ "s2p_query": "Instruct: Given a web search query, retrieve relevant passages that answer the query.\nQuery: ",
9
+ "s2s_query": "Instruct: Retrieve semantically similar text.\nQuery: "
10
+ },
11
+ "default_prompt_name": null,
12
+ "similarity_fn_name": "cosine"
13
+ }
configuration_jasper_vl.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import Qwen2Config, PretrainedConfig, SiglipVisionConfig
2
+ from transformers.utils import logging
3
+
4
+ logger = logging.get_logger(__name__)
5
+
6
+
7
+ class JasperVLConfig(PretrainedConfig):
8
+ model_type = "jasper_vl"
9
+
10
+ def __init__(
11
+ self,
12
+ is_text_encoder: bool = True,
13
+ vector_dim: int = 12288,
14
+ vector_dropout_p: float = 0.2,
15
+
16
+ num_img_tokens: int = 300,
17
+
18
+ img_start_token_id: int = 151646,
19
+ img_start_token: str = "<|jasper_img_start|>",
20
+
21
+ img_token_id: int = 151647,
22
+ img_token: str = "<|jasper_img_token|>",
23
+
24
+ img_end_token_id: int = 151648,
25
+ img_end_token: str = "<|jasper_img_end|>",
26
+
27
+ text_config=None,
28
+ vision_config=None,
29
+
30
+ **kwargs
31
+ ):
32
+ super().__init__(**kwargs)
33
+ if vector_dim not in (12288, 1024, 512, 256):
34
+ raise ValueError("vector_dim must be 12288, 1024, 512, 256")
35
+ self.is_text_encoder = is_text_encoder
36
+ self.vector_dim = vector_dim
37
+ self.vector_dropout_p = vector_dropout_p
38
+
39
+ self.num_img_tokens = num_img_tokens
40
+
41
+ self.img_start_token_id = img_start_token_id
42
+ self.img_start_token = img_start_token
43
+
44
+ self.img_token_id = img_token_id
45
+ self.img_token = img_token
46
+
47
+ self.img_end_token_id = img_end_token_id
48
+ self.img_end_token = img_end_token
49
+
50
+ if text_config is None:
51
+ text_config = {}
52
+ logger.info("`text_config` is `None`. Initializing the `Qwen2Config` with default values.")
53
+
54
+ if vision_config is None:
55
+ vision_config = {}
56
+ logger.info("`vision_config` is `None`. initializing the `SiglipVisionConfig` with default values.")
57
+
58
+ self.text_config = Qwen2Config(**text_config)
59
+ self.vision_config = SiglipVisionConfig(**vision_config)
60
+
61
+ @classmethod
62
+ def from_text_vision_configs(cls, text_config: Qwen2Config, vision_config: SiglipVisionConfig, **kwargs):
63
+ r"""
64
+ Instantiate a [`SiglipConfig`] (or a derived class) from siglip text model configuration and siglip vision
65
+ model configuration.
66
+
67
+ Returns:
68
+ [`SiglipConfig`]: An instance of a configuration object
69
+ """
70
+
71
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
72
+
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bc2ca2f4d5e9a843682d7f1875a707ae2f031aaa39e65026aedcbdcc60b5dc2
3
+ size 3986381144
modeling_jasper_vl.py ADDED
@@ -0,0 +1,1213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch Qwen2 model."""
21
+ import os
22
+
23
+ from transformers import Qwen2Config
24
+ import random
25
+ import math
26
+ import warnings
27
+ from typing import List, Optional, Tuple, Union
28
+
29
+ import torch
30
+ import torch.nn.functional as F
31
+ import torch.utils.checkpoint
32
+ from torch import nn
33
+
34
+ from transformers.activations import ACT2FN
35
+ from transformers.cache_utils import Cache, DynamicCache
36
+ from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, \
37
+ _prepare_4d_causal_attention_mask_for_sdpa, _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
38
+ from transformers.modeling_outputs import BaseModelOutputWithPast
39
+ from transformers.modeling_utils import PreTrainedModel
40
+ from transformers.utils import (
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ is_flash_attn_greater_or_equal_2_10,
44
+ logging,
45
+ )
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ _CHECKPOINT_FOR_DOC = "Qwen/Qwen2-7B-beta"
50
+ _CONFIG_FOR_DOC = "Qwen2Config"
51
+
52
+ QWEN2_PRETRAINED_MODEL_ARCHIVE_LIST = [
53
+ "Qwen/Qwen2-7B-beta",
54
+ # See all Qwen2 models at https://huggingface.co/models?filter=qwen2
55
+ ]
56
+
57
+
58
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
59
+ def _get_unpad_data(attention_mask):
60
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
61
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
62
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
63
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
64
+ return (
65
+ indices,
66
+ cu_seqlens,
67
+ max_seqlen_in_batch,
68
+ )
69
+
70
+
71
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Qwen2
72
+ class Qwen2RMSNorm(nn.Module):
73
+ def __init__(self, hidden_size, eps=1e-6):
74
+ """
75
+ Qwen2RMSNorm is equivalent to T5LayerNorm
76
+ """
77
+ super().__init__()
78
+ self.weight = nn.Parameter(torch.ones(hidden_size))
79
+ self.variance_epsilon = eps
80
+
81
+ def forward(self, hidden_states):
82
+ input_dtype = hidden_states.dtype
83
+ hidden_states = hidden_states.to(torch.float32)
84
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
85
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
86
+ return self.weight * hidden_states.to(input_dtype)
87
+
88
+
89
+ # Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Qwen2
90
+ class Qwen2RotaryEmbedding(nn.Module):
91
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
92
+ super().__init__()
93
+
94
+ self.dim = dim
95
+ self.max_position_embeddings = max_position_embeddings
96
+ self.base = base
97
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
98
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
99
+
100
+ # Build here to make `torch.jit.trace` work.
101
+ self._set_cos_sin_cache(
102
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
103
+ )
104
+
105
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
106
+ self.max_seq_len_cached = seq_len
107
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
108
+
109
+ freqs = torch.outer(t, self.inv_freq)
110
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
111
+ emb = torch.cat((freqs, freqs), dim=-1)
112
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
113
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
114
+
115
+ def forward(self, x, seq_len=None):
116
+ # x: [bs, num_attention_heads, seq_len, head_size]
117
+ if seq_len > self.max_seq_len_cached:
118
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
119
+
120
+ return (
121
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
122
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
123
+ )
124
+
125
+
126
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
127
+ def rotate_half(x):
128
+ """Rotates half the hidden dims of the input."""
129
+ x1 = x[..., : x.shape[-1] // 2]
130
+ x2 = x[..., x.shape[-1] // 2:]
131
+ return torch.cat((-x2, x1), dim=-1)
132
+
133
+
134
+ # Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
135
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
136
+ """Applies Rotary Position Embedding to the query and key tensors.
137
+
138
+ Args:
139
+ q (`torch.Tensor`): The query tensor.
140
+ k (`torch.Tensor`): The key tensor.
141
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
142
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
143
+ position_ids (`torch.Tensor`):
144
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
145
+ used to pass offsetted position ids when working with a KV-cache.
146
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
147
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
148
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
149
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
150
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
151
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
152
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
153
+ Returns:
154
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
155
+ """
156
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
157
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
158
+ q_embed = (q * cos) + (rotate_half(q) * sin)
159
+ k_embed = (k * cos) + (rotate_half(k) * sin)
160
+ return q_embed, k_embed
161
+
162
+
163
+ # Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Qwen2
164
+ class Qwen2MLP(nn.Module):
165
+ def __init__(self, config):
166
+ super().__init__()
167
+ self.config = config
168
+ self.hidden_size = config.hidden_size
169
+ self.intermediate_size = config.intermediate_size
170
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
171
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
172
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
173
+ self.act_fn = ACT2FN[config.hidden_act]
174
+
175
+ def forward(self, x):
176
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
177
+
178
+
179
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
180
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
181
+ """
182
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
183
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
184
+ """
185
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
186
+ if n_rep == 1:
187
+ return hidden_states
188
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
189
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
190
+
191
+
192
+ class Qwen2Attention(nn.Module):
193
+ """
194
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
195
+ and "Generating Long Sequences with Sparse Transformers".
196
+ """
197
+
198
+ def __init__(self, config: Qwen2Config, layer_idx: Optional[int] = None):
199
+ super().__init__()
200
+ self.config = config
201
+ self.layer_idx = layer_idx
202
+ if layer_idx is None:
203
+ logger.warning_once(
204
+ f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
205
+ "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
206
+ "when creating this class."
207
+ )
208
+
209
+ self.hidden_size = config.hidden_size
210
+ self.num_heads = config.num_attention_heads
211
+ self.head_dim = self.hidden_size // self.num_heads
212
+ self.num_key_value_heads = config.num_key_value_heads
213
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
214
+ self.max_position_embeddings = config.max_position_embeddings
215
+ self.rope_theta = config.rope_theta
216
+ self.is_causal = True
217
+ self.attention_dropout = config.attention_dropout
218
+
219
+ if (self.head_dim * self.num_heads) != self.hidden_size:
220
+ raise ValueError(
221
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
222
+ f" and `num_heads`: {self.num_heads})."
223
+ )
224
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
225
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
226
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
227
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
228
+
229
+ self.rotary_emb = Qwen2RotaryEmbedding(
230
+ self.head_dim,
231
+ max_position_embeddings=self.max_position_embeddings,
232
+ base=self.rope_theta,
233
+ )
234
+
235
+ def forward(
236
+ self,
237
+ hidden_states: torch.Tensor,
238
+ attention_mask: Optional[torch.Tensor] = None,
239
+ position_ids: Optional[torch.LongTensor] = None,
240
+ past_key_value: Optional[Cache] = None,
241
+ output_attentions: bool = False,
242
+ use_cache: bool = False,
243
+ **kwargs,
244
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
245
+ if "padding_mask" in kwargs:
246
+ warnings.warn(
247
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
248
+ )
249
+ bsz, q_len, _ = hidden_states.size()
250
+
251
+ query_states = self.q_proj(hidden_states)
252
+ key_states = self.k_proj(hidden_states)
253
+ value_states = self.v_proj(hidden_states)
254
+
255
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
256
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
257
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
258
+
259
+ kv_seq_len = key_states.shape[-2]
260
+ if past_key_value is not None:
261
+ if self.layer_idx is None:
262
+ raise ValueError(
263
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
264
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
265
+ "with a layer index."
266
+ )
267
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
268
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
269
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
270
+
271
+ if past_key_value is not None:
272
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
273
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
274
+
275
+ # repeat k/v heads if n_kv_heads < n_heads
276
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
277
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
278
+
279
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
280
+
281
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
282
+ raise ValueError(
283
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
284
+ f" {attn_weights.size()}"
285
+ )
286
+
287
+ if attention_mask is not None:
288
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
289
+ raise ValueError(
290
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
291
+ )
292
+
293
+ attn_weights = attn_weights + attention_mask
294
+
295
+ # upcast attention to fp32
296
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
297
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
298
+ attn_output = torch.matmul(attn_weights, value_states)
299
+
300
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
301
+ raise ValueError(
302
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
303
+ f" {attn_output.size()}"
304
+ )
305
+
306
+ attn_output = attn_output.transpose(1, 2).contiguous()
307
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
308
+
309
+ attn_output = self.o_proj(attn_output)
310
+
311
+ if not output_attentions:
312
+ attn_weights = None
313
+
314
+ return attn_output, attn_weights, past_key_value
315
+
316
+
317
+ class Qwen2FlashAttention2(Qwen2Attention):
318
+ """
319
+ Qwen2 flash attention module, following Qwen2 attention module. This module inherits from `Qwen2Attention`
320
+ as the weights of the module stays untouched. The only required change would be on the forward pass
321
+ where it needs to correctly call the public API of flash attention and deal with padding tokens
322
+ in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom
323
+ config.max_window_layers layers.
324
+ """
325
+
326
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
327
+ def __init__(self, *args, **kwargs):
328
+ super().__init__(*args, **kwargs)
329
+
330
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
331
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
332
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
333
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
334
+
335
+ def forward(
336
+ self,
337
+ hidden_states: torch.Tensor,
338
+ attention_mask: Optional[torch.Tensor] = None,
339
+ position_ids: Optional[torch.LongTensor] = None,
340
+ past_key_value: Optional[Cache] = None,
341
+ output_attentions: bool = False,
342
+ use_cache: bool = False,
343
+ is_causal: bool = False,
344
+ **kwargs,
345
+ ):
346
+ if "padding_mask" in kwargs:
347
+ warnings.warn(
348
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
349
+ )
350
+
351
+ # overwrite attention_mask with padding_mask
352
+ attention_mask = kwargs.pop("padding_mask")
353
+ bsz, q_len, _ = hidden_states.size()
354
+
355
+ query_states = self.q_proj(hidden_states)
356
+ key_states = self.k_proj(hidden_states)
357
+ value_states = self.v_proj(hidden_states)
358
+
359
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
360
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
361
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
362
+
363
+ kv_seq_len = key_states.shape[-2]
364
+ if past_key_value is not None:
365
+ if self.layer_idx is None:
366
+ raise ValueError(
367
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
368
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
369
+ "with a layer index."
370
+ )
371
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
372
+
373
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
374
+ rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
375
+ cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
376
+
377
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
378
+
379
+ use_sliding_windows = (
380
+ _flash_supports_window_size
381
+ and getattr(self.config, "sliding_window", None) is not None
382
+ and kv_seq_len > self.config.sliding_window
383
+ and self.config.use_sliding_window
384
+ )
385
+
386
+ if not _flash_supports_window_size:
387
+ logger.warning_once(
388
+ "The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
389
+ " make sure to upgrade flash-attn library."
390
+ )
391
+
392
+ if past_key_value is not None:
393
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
394
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
395
+ if (
396
+ getattr(self.config, "sliding_window", None) is not None
397
+ and kv_seq_len > self.config.sliding_window
398
+ and cache_has_contents
399
+ ):
400
+ slicing_tokens = 1 - self.config.sliding_window
401
+
402
+ past_key = past_key_value[self.layer_idx][0]
403
+ past_value = past_key_value[self.layer_idx][1]
404
+
405
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
406
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
407
+
408
+ if past_key.shape[-2] != self.config.sliding_window - 1:
409
+ raise ValueError(
410
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
411
+ f" {past_key.shape}"
412
+ )
413
+
414
+ if attention_mask is not None:
415
+ attention_mask = attention_mask[:, slicing_tokens:]
416
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
417
+
418
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
419
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
420
+
421
+ # repeat k/v heads if n_kv_heads < n_heads
422
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
423
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
424
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
425
+
426
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
427
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
428
+ # cast them back in float16 just to be sure everything works as expected.
429
+ input_dtype = query_states.dtype
430
+ if input_dtype == torch.float32:
431
+ if torch.is_autocast_enabled():
432
+ target_dtype = torch.get_autocast_gpu_dtype()
433
+ # Handle the case where the model is quantized
434
+ elif hasattr(self.config, "_pre_quantization_dtype"):
435
+ target_dtype = self.config._pre_quantization_dtype
436
+ else:
437
+ target_dtype = self.q_proj.weight.dtype
438
+
439
+ logger.warning_once(
440
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
441
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
442
+ f" {target_dtype}."
443
+ )
444
+
445
+ query_states = query_states.to(target_dtype)
446
+ key_states = key_states.to(target_dtype)
447
+ value_states = value_states.to(target_dtype)
448
+
449
+ # Reashape to the expected shape for Flash Attention
450
+ query_states = query_states.transpose(1, 2)
451
+ key_states = key_states.transpose(1, 2)
452
+ value_states = value_states.transpose(1, 2)
453
+
454
+ attn_output = self._flash_attention_forward(
455
+ query_states,
456
+ key_states,
457
+ value_states,
458
+ attention_mask,
459
+ q_len,
460
+ dropout=dropout_rate,
461
+ use_sliding_windows=use_sliding_windows,
462
+ is_causal=is_causal
463
+ )
464
+
465
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
466
+ attn_output = self.o_proj(attn_output)
467
+
468
+ if not output_attentions:
469
+ attn_weights = None
470
+
471
+ return attn_output, attn_weights, past_key_value
472
+
473
+ def _flash_attention_forward(
474
+ self,
475
+ query_states,
476
+ key_states,
477
+ value_states,
478
+ attention_mask,
479
+ query_length,
480
+ dropout=0.0,
481
+ softmax_scale=None,
482
+ use_sliding_windows=False,
483
+ is_causal=True,
484
+ ):
485
+ """
486
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
487
+ first unpad the input, then computes the attention scores and pad the final attention scores.
488
+
489
+ Args:
490
+ query_states (`torch.Tensor`):
491
+ Input query states to be passed to Flash Attention API
492
+ key_states (`torch.Tensor`):
493
+ Input key states to be passed to Flash Attention API
494
+ value_states (`torch.Tensor`):
495
+ Input value states to be passed to Flash Attention API
496
+ attention_mask (`torch.Tensor`):
497
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
498
+ position of padding tokens and 1 for the position of non-padding tokens.
499
+ dropout (`int`, *optional*):
500
+ Attention dropout
501
+ softmax_scale (`float`, *optional*):
502
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
503
+ use_sliding_windows (`bool`, *optional*):
504
+ Whether to activate sliding window attention.
505
+ """
506
+ if not self._flash_attn_uses_top_left_mask:
507
+ causal = is_causal
508
+ else:
509
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
510
+ causal = is_causal and query_length != 1
511
+
512
+ # Decide whether to use SWA or not by layer index.
513
+ if use_sliding_windows and self.layer_idx >= self.config.max_window_layers:
514
+ use_sliding_windows = False
515
+
516
+ # Contains at least one padding token in the sequence
517
+ if attention_mask is not None:
518
+ batch_size = query_states.shape[0]
519
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
520
+ query_states, key_states, value_states, attention_mask, query_length
521
+ )
522
+
523
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
524
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
525
+
526
+ if not use_sliding_windows:
527
+ attn_output_unpad = flash_attn_varlen_func(
528
+ query_states,
529
+ key_states,
530
+ value_states,
531
+ cu_seqlens_q=cu_seqlens_q,
532
+ cu_seqlens_k=cu_seqlens_k,
533
+ max_seqlen_q=max_seqlen_in_batch_q,
534
+ max_seqlen_k=max_seqlen_in_batch_k,
535
+ dropout_p=dropout,
536
+ softmax_scale=softmax_scale,
537
+ causal=causal,
538
+ )
539
+ else:
540
+ attn_output_unpad = flash_attn_varlen_func(
541
+ query_states,
542
+ key_states,
543
+ value_states,
544
+ cu_seqlens_q=cu_seqlens_q,
545
+ cu_seqlens_k=cu_seqlens_k,
546
+ max_seqlen_q=max_seqlen_in_batch_q,
547
+ max_seqlen_k=max_seqlen_in_batch_k,
548
+ dropout_p=dropout,
549
+ softmax_scale=softmax_scale,
550
+ causal=causal,
551
+ window_size=(self.config.sliding_window, self.config.sliding_window),
552
+ )
553
+
554
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
555
+ else:
556
+ if not use_sliding_windows:
557
+ attn_output = flash_attn_func(
558
+ query_states,
559
+ key_states,
560
+ value_states,
561
+ dropout,
562
+ softmax_scale=softmax_scale,
563
+ causal=causal,
564
+ )
565
+ else:
566
+ attn_output = flash_attn_func(
567
+ query_states,
568
+ key_states,
569
+ value_states,
570
+ dropout,
571
+ softmax_scale=softmax_scale,
572
+ causal=causal,
573
+ window_size=(self.config.sliding_window, self.config.sliding_window),
574
+ )
575
+
576
+ return attn_output
577
+
578
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
579
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
580
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
581
+
582
+ # On the first iteration we need to properly re-create the padding mask
583
+ # by slicing it on the proper place
584
+ if kv_seq_len != attention_mask.shape[-1]:
585
+ attention_mask_num_tokens = attention_mask.shape[-1]
586
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len:]
587
+
588
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
589
+
590
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
591
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
592
+
593
+ if query_length == kv_seq_len:
594
+ query_layer = index_first_axis(
595
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
596
+ )
597
+ cu_seqlens_q = cu_seqlens_k
598
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
599
+ indices_q = indices_k
600
+ elif query_length == 1:
601
+ max_seqlen_in_batch_q = 1
602
+ cu_seqlens_q = torch.arange(
603
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
604
+ ) # There is a memcpy here, that is very bad.
605
+ indices_q = cu_seqlens_q[:-1]
606
+ query_layer = query_layer.squeeze(1)
607
+ else:
608
+ # The -q_len: slice assumes left padding.
609
+ attention_mask = attention_mask[:, -query_length:]
610
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
611
+
612
+ return (
613
+ query_layer,
614
+ key_layer,
615
+ value_layer,
616
+ indices_q,
617
+ (cu_seqlens_q, cu_seqlens_k),
618
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
619
+ )
620
+
621
+
622
+ # Copied from transformers.models.mistral.modeling_mistral.MistralSdpaAttention with Mistral->Qwen2
623
+ class Qwen2SdpaAttention(Qwen2Attention):
624
+ """
625
+ Qwen2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
626
+ `Qwen2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
627
+ SDPA API.
628
+ """
629
+
630
+ # Adapted from Qwen2Attention.forward
631
+ def forward(
632
+ self,
633
+ hidden_states: torch.Tensor,
634
+ attention_mask: Optional[torch.Tensor] = None,
635
+ position_ids: Optional[torch.LongTensor] = None,
636
+ past_key_value: Optional[Cache] = None,
637
+ output_attentions: bool = False,
638
+ use_cache: bool = False,
639
+ is_causal: bool = True,
640
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
641
+ if output_attentions:
642
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
643
+ logger.warning_once(
644
+ "Qwen2Model is using Qwen2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
645
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
646
+ )
647
+ return super().forward(
648
+ hidden_states=hidden_states,
649
+ attention_mask=attention_mask,
650
+ position_ids=position_ids,
651
+ past_key_value=past_key_value,
652
+ output_attentions=output_attentions,
653
+ use_cache=use_cache,
654
+ is_causal=is_causal
655
+ )
656
+
657
+ bsz, q_len, _ = hidden_states.size()
658
+
659
+ query_states = self.q_proj(hidden_states)
660
+ key_states = self.k_proj(hidden_states)
661
+ value_states = self.v_proj(hidden_states)
662
+
663
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
664
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
665
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
666
+
667
+ kv_seq_len = key_states.shape[-2]
668
+ if past_key_value is not None:
669
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
670
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
671
+
672
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
673
+
674
+ if past_key_value is not None:
675
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
676
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
677
+
678
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
679
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
680
+
681
+ if attention_mask is not None:
682
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
683
+ raise ValueError(
684
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
685
+ )
686
+
687
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
688
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
689
+ if query_states.device.type == "cuda" and attention_mask is not None:
690
+ query_states = query_states.contiguous()
691
+ key_states = key_states.contiguous()
692
+ value_states = value_states.contiguous()
693
+
694
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
695
+ query_states,
696
+ key_states,
697
+ value_states,
698
+ attn_mask=attention_mask,
699
+ dropout_p=self.attention_dropout if self.training else 0.0,
700
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
701
+ is_causal=is_causal and attention_mask is None and q_len > 1,
702
+ )
703
+
704
+ attn_output = attn_output.transpose(1, 2).contiguous()
705
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
706
+
707
+ attn_output = self.o_proj(attn_output)
708
+
709
+ return attn_output, None, past_key_value
710
+
711
+
712
+ QWEN2_ATTENTION_CLASSES = {
713
+ "eager": Qwen2Attention,
714
+ "flash_attention_2": Qwen2FlashAttention2,
715
+ "sdpa": Qwen2SdpaAttention,
716
+ }
717
+
718
+
719
+ class Qwen2DecoderLayer(nn.Module):
720
+ def __init__(self, config: Qwen2Config, layer_idx: int):
721
+ super().__init__()
722
+ self.hidden_size = config.hidden_size
723
+
724
+ if config.use_sliding_window and config._attn_implementation != "flash_attention_2":
725
+ logger.warning_once(
726
+ f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
727
+ "unexpected results may be encountered."
728
+ )
729
+ self.self_attn = QWEN2_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
730
+
731
+ self.mlp = Qwen2MLP(config)
732
+ self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
733
+ self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
734
+
735
+ def forward(
736
+ self,
737
+ hidden_states: torch.Tensor,
738
+ attention_mask: Optional[torch.Tensor] = None,
739
+ position_ids: Optional[torch.LongTensor] = None,
740
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
741
+ output_attentions: Optional[bool] = False,
742
+ use_cache: Optional[bool] = False,
743
+ is_causal: Optional[bool] = True,
744
+ **kwargs,
745
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
746
+ if "padding_mask" in kwargs:
747
+ warnings.warn(
748
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
749
+ "Please make sure use `attention_mask` instead.`"
750
+ )
751
+ """
752
+ Args:
753
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
754
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
755
+ `(batch, sequence_length)` where padding elements are indicated by 0.
756
+ output_attentions (`bool`, *optional*):
757
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
758
+ returned tensors for more detail.
759
+ use_cache (`bool`, *optional*):
760
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
761
+ (see `past_key_values`).
762
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
763
+ """
764
+
765
+ residual = hidden_states
766
+
767
+ hidden_states = self.input_layernorm(hidden_states)
768
+
769
+ # Self Attention
770
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
771
+ hidden_states=hidden_states,
772
+ attention_mask=attention_mask,
773
+ position_ids=position_ids,
774
+ past_key_value=past_key_value,
775
+ output_attentions=output_attentions,
776
+ use_cache=use_cache,
777
+ is_causal=is_causal,
778
+ )
779
+ hidden_states = residual + hidden_states
780
+
781
+ # Fully Connected
782
+ residual = hidden_states
783
+ hidden_states = self.post_attention_layernorm(hidden_states)
784
+ hidden_states = self.mlp(hidden_states)
785
+ hidden_states = residual + hidden_states
786
+
787
+ outputs = (hidden_states,)
788
+
789
+ if output_attentions:
790
+ outputs += (self_attn_weights,)
791
+
792
+ if use_cache:
793
+ outputs += (present_key_value,)
794
+
795
+ return outputs
796
+
797
+
798
+ QWEN2_START_DOCSTRING = r"""
799
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
800
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
801
+ etc.)
802
+
803
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
804
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
805
+ and behavior.
806
+
807
+ Parameters:
808
+ config ([`Qwen2Config`]):
809
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
810
+ load the weights associated with the model, only the configuration. Check out the
811
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
812
+ """
813
+
814
+
815
+ @add_start_docstrings(
816
+ "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
817
+ QWEN2_START_DOCSTRING,
818
+ )
819
+ class Qwen2PreTrainedModel(PreTrainedModel):
820
+ config_class = Qwen2Config
821
+ base_model_prefix = "model"
822
+ supports_gradient_checkpointing = True
823
+ _no_split_modules = ["Qwen2DecoderLayer"]
824
+ _skip_keys_device_placement = "past_key_values"
825
+ _supports_flash_attn_2 = True
826
+ _supports_sdpa = True
827
+ _supports_cache_class = True
828
+
829
+ def _init_weights(self, module):
830
+ std = self.config.initializer_range
831
+ if isinstance(module, nn.Linear):
832
+ module.weight.data.normal_(mean=0.0, std=std)
833
+ if module.bias is not None:
834
+ module.bias.data.zero_()
835
+ elif isinstance(module, nn.Embedding):
836
+ module.weight.data.normal_(mean=0.0, std=std)
837
+ if module.padding_idx is not None:
838
+ module.weight.data[module.padding_idx].zero_()
839
+
840
+
841
+ QWEN2_INPUTS_DOCSTRING = r"""
842
+ Args:
843
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
844
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
845
+ it.
846
+
847
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
848
+ [`PreTrainedTokenizer.__call__`] for details.
849
+
850
+ [What are input IDs?](../glossary#input-ids)
851
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
852
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
853
+
854
+ - 1 for tokens that are **not masked**,
855
+ - 0 for tokens that are **masked**.
856
+
857
+ [What are attention masks?](../glossary#attention-mask)
858
+
859
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
860
+ [`PreTrainedTokenizer.__call__`] for details.
861
+
862
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
863
+ `past_key_values`).
864
+
865
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
866
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
867
+ information on the default strategy.
868
+
869
+ - 1 indicates the head is **not masked**,
870
+ - 0 indicates the head is **masked**.
871
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
872
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
873
+ config.n_positions - 1]`.
874
+
875
+ [What are position IDs?](../glossary#position-ids)
876
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
877
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
878
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
879
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
880
+
881
+ Two formats are allowed:
882
+ - a [`~cache_utils.Cache`] instance;
883
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
884
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
885
+ cache format.
886
+
887
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
888
+ legacy cache format will be returned.
889
+
890
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
891
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
892
+ of shape `(batch_size, sequence_length)`.
893
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
894
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
895
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
896
+ model's internal embedding lookup matrix.
897
+ use_cache (`bool`, *optional*):
898
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
899
+ `past_key_values`).
900
+ output_attentions (`bool`, *optional*):
901
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
902
+ tensors for more detail.
903
+ output_hidden_states (`bool`, *optional*):
904
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
905
+ more detail.
906
+ return_dict (`bool`, *optional*):
907
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
908
+ """
909
+
910
+
911
+ @add_start_docstrings(
912
+ "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
913
+ QWEN2_START_DOCSTRING,
914
+ )
915
+ class Qwen2Model(Qwen2PreTrainedModel):
916
+ """
917
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`]
918
+
919
+ Args:
920
+ config: Qwen2Config
921
+ """
922
+
923
+ def __init__(self, config: Qwen2Config):
924
+ super().__init__(config)
925
+ self.padding_idx = config.pad_token_id
926
+ self.vocab_size = config.vocab_size
927
+
928
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
929
+ self.layers = nn.ModuleList(
930
+ [Qwen2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
931
+ )
932
+ self._attn_implementation = config._attn_implementation
933
+ self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
934
+
935
+ self.gradient_checkpointing = False
936
+ # Initialize weights and apply final processing
937
+ self.post_init()
938
+
939
+ def get_input_embeddings(self):
940
+ return self.embed_tokens
941
+
942
+ def set_input_embeddings(self, value):
943
+ self.embed_tokens = value
944
+
945
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
946
+ def forward(
947
+ self,
948
+ input_ids: torch.LongTensor = None,
949
+ attention_mask: Optional[torch.Tensor] = None,
950
+ position_ids: Optional[torch.LongTensor] = None,
951
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
952
+ inputs_embeds: Optional[torch.FloatTensor] = None,
953
+ use_cache: Optional[bool] = None,
954
+ output_attentions: Optional[bool] = None,
955
+ output_hidden_states: Optional[bool] = None,
956
+ return_dict: Optional[bool] = None,
957
+ labels: Optional[torch.LongTensor] = None,
958
+ is_causal: Optional[bool] = False,
959
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
960
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
961
+ output_hidden_states = (
962
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
963
+ )
964
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
965
+
966
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
967
+
968
+ # retrieve input_ids and inputs_embeds
969
+ if input_ids is not None and inputs_embeds is not None:
970
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
971
+ elif input_ids is not None:
972
+ batch_size, seq_length = input_ids.shape
973
+ elif inputs_embeds is not None:
974
+ batch_size, seq_length, _ = inputs_embeds.shape
975
+ else:
976
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
977
+
978
+ if self.gradient_checkpointing and self.training:
979
+ if use_cache:
980
+ logger.warning_once(
981
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
982
+ )
983
+ use_cache = False
984
+
985
+ past_key_values_length = 0
986
+
987
+ if use_cache:
988
+ use_legacy_cache = not isinstance(past_key_values, Cache)
989
+ if use_legacy_cache:
990
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
991
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
992
+
993
+ if position_ids is None:
994
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
995
+ position_ids = torch.arange(
996
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
997
+ )
998
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
999
+ else:
1000
+ position_ids = position_ids.view(-1, seq_length).long()
1001
+
1002
+ if inputs_embeds is None:
1003
+ inputs_embeds = self.embed_tokens(input_ids)
1004
+
1005
+ if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache:
1006
+ is_padding_right = attention_mask[:, -1].sum().item() != batch_size
1007
+ if is_padding_right:
1008
+ raise ValueError(
1009
+ "You are attempting to perform batched generation with padding_side='right'"
1010
+ " this may lead to unexpected behaviour for Flash Attention version of Qwen2. Make sure to "
1011
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
1012
+ )
1013
+
1014
+ if self._attn_implementation == "flash_attention_2":
1015
+ # 2d mask is passed through the layers
1016
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
1017
+ elif self._attn_implementation == "sdpa" and not output_attentions:
1018
+ # output_attentions=True can not be supported when using SDPA, and we fall back on
1019
+ # the manual implementation that requires a 4D causal mask in all cases.
1020
+ if is_causal:
1021
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
1022
+ attention_mask,
1023
+ (batch_size, seq_length),
1024
+ inputs_embeds,
1025
+ past_key_values_length,
1026
+ )
1027
+ else:
1028
+ attention_mask = _prepare_4d_attention_mask_for_sdpa(
1029
+ attention_mask, inputs_embeds.dtype
1030
+ )
1031
+ else:
1032
+ # 4d mask is passed through the layers
1033
+ if is_causal:
1034
+ # Causal mask with -3.3895e+38 where no attention should be
1035
+ attention_mask = _prepare_4d_causal_attention_mask(
1036
+ attention_mask,
1037
+ (batch_size, seq_length),
1038
+ inputs_embeds,
1039
+ past_key_values_length,
1040
+ sliding_window=self.config.sliding_window,
1041
+ )
1042
+ else:
1043
+ # Shape: batch_size, 1, query_length, key_value_length
1044
+ attention_mask = _prepare_4d_attention_mask(
1045
+ attention_mask, inputs_embeds.dtype
1046
+ )
1047
+
1048
+ hidden_states = inputs_embeds
1049
+
1050
+ # decoder layers
1051
+ all_hidden_states = () if output_hidden_states else None
1052
+ all_self_attns = () if output_attentions else None
1053
+ next_decoder_cache = None
1054
+
1055
+ for decoder_layer in self.layers:
1056
+ if output_hidden_states:
1057
+ all_hidden_states += (hidden_states,)
1058
+
1059
+ if self.gradient_checkpointing and self.training:
1060
+ layer_outputs = self._gradient_checkpointing_func(
1061
+ decoder_layer.__call__,
1062
+ hidden_states,
1063
+ attention_mask,
1064
+ position_ids,
1065
+ past_key_values,
1066
+ output_attentions,
1067
+ use_cache,
1068
+ is_causal,
1069
+ )
1070
+ else:
1071
+ layer_outputs = decoder_layer(
1072
+ hidden_states,
1073
+ attention_mask=attention_mask,
1074
+ position_ids=position_ids,
1075
+ past_key_value=past_key_values,
1076
+ output_attentions=output_attentions,
1077
+ use_cache=use_cache,
1078
+ is_causal=is_causal,
1079
+ )
1080
+
1081
+ hidden_states = layer_outputs[0]
1082
+
1083
+ if use_cache:
1084
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1085
+
1086
+ if output_attentions:
1087
+ all_self_attns += (layer_outputs[1],)
1088
+
1089
+ hidden_states = self.norm(hidden_states)
1090
+
1091
+ # add hidden states from the last decoder layer
1092
+ if output_hidden_states:
1093
+ all_hidden_states += (hidden_states,)
1094
+
1095
+ next_cache = None
1096
+ if use_cache:
1097
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
1098
+
1099
+ if not return_dict:
1100
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1101
+ return BaseModelOutputWithPast(
1102
+ last_hidden_state=hidden_states,
1103
+ past_key_values=next_cache,
1104
+ hidden_states=all_hidden_states,
1105
+ attentions=all_self_attns,
1106
+ )
1107
+
1108
+
1109
+ ####################################################################################################################
1110
+ ####################################################################################################################
1111
+ ####################################################################################################################
1112
+ ####################################################################################################################
1113
+ ### codes for jasper
1114
+ ####################################################################################################################
1115
+ from .configuration_jasper_vl import JasperVLConfig
1116
+ from transformers import SiglipVisionModel
1117
+
1118
+
1119
+ class JasperVL(PreTrainedModel):
1120
+ config_class = JasperVLConfig
1121
+ _supports_sdpa = True
1122
+ _supports_flash_attn_2 = True
1123
+ base_model_prefix = "model"
1124
+ supports_gradient_checkpointing = True
1125
+
1126
+ def __init__(self, config: JasperVLConfig):
1127
+ super().__init__(config)
1128
+ self.model = Qwen2Model(config.text_config)
1129
+ self.config = config
1130
+ if not config.is_text_encoder:
1131
+ self.vision_model = SiglipVisionModel(config.vision_config)
1132
+ self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d(
1133
+ (self.config.num_img_tokens, config.text_config.hidden_size)
1134
+ )
1135
+ self.vec_dropout = nn.Dropout1d(config.vector_dropout_p, inplace=True)
1136
+
1137
+ self.vector_linear_12288 = nn.Linear(config.text_config.hidden_size, 12288, bias=True)
1138
+ self.vector_linear_1024 = nn.Linear(config.text_config.hidden_size, 1024, bias=True)
1139
+ self.vector_linear_512 = nn.Linear(config.text_config.hidden_size, 512, bias=True)
1140
+ self.vector_linear_256 = nn.Linear(config.text_config.hidden_size, 256, bias=True)
1141
+
1142
+ # Initialize weights and apply final processing
1143
+ self.post_init()
1144
+
1145
+ def get_input_embeddings(self):
1146
+ return self.model.embed_tokens
1147
+
1148
+ def set_input_embeddings(self, value):
1149
+ self.model.embed_tokens = value
1150
+
1151
+ def forward(
1152
+ self,
1153
+ input_ids: torch.LongTensor = None,
1154
+ attention_mask: Optional[torch.Tensor] = None,
1155
+ pixel_values: Optional[torch.Tensor] = None,
1156
+ *args,
1157
+ **kwargs
1158
+ ):
1159
+ # bsz*seq_len*hidden_size
1160
+ if self.config.is_text_encoder or pixel_values is None:
1161
+ last_hidden_state = self.model(
1162
+ input_ids,
1163
+ attention_mask=attention_mask,
1164
+ )[0]
1165
+ else:
1166
+ inputs_embeds = self.model.embed_tokens(input_ids)
1167
+ # print("inputs_embeds.shape", inputs_embeds.shape)
1168
+ B, N, C = inputs_embeds.shape
1169
+ inputs_embeds = inputs_embeds.reshape(B * N, C)
1170
+
1171
+ vit_embeds = self.vision_model(pixel_values=pixel_values)["last_hidden_state"]
1172
+ # print("vit_embeds.shape", vit_embeds.shape)
1173
+ vit_embeds = self.adaptive_avg_pool2d(vit_embeds)
1174
+ # print("vit_embeds_adapt.shape", vit_embeds.shape)
1175
+ # 拼接start 和 end
1176
+ # vit_embeds = torch.cat(
1177
+ # (self.vs_token_emb.expand((B, 1, C)), vit_embeds, self.ve_token_emb.expand((B, 1, C))),
1178
+ # dim=1,
1179
+ # )
1180
+ # print("vit_embeds_adapt_cat.shape", vit_embeds.shape)
1181
+ # TODO vis start和 vis end都用img_token_id代替来简化代码
1182
+ selected = (input_ids.reshape(B * N) == self.config.img_token_id)
1183
+ # print("selected.shape", selected.shape)
1184
+ # print("selected[:4]", selected[:4])
1185
+ # print("selected[285:305]", selected[285:305])
1186
+ inputs_embeds[selected] = vit_embeds.reshape(-1, C)
1187
+ inputs_embeds = inputs_embeds.reshape(B, N, C)
1188
+ last_hidden_state = self.model(
1189
+ inputs_embeds=inputs_embeds,
1190
+ attention_mask=attention_mask,
1191
+ )[0]
1192
+ last_hidden = last_hidden_state.masked_fill(~attention_mask[..., None].bool(), 0.0)
1193
+ # 默认padding side是right.保留第一个避免全0
1194
+
1195
+ self.vec_dropout(last_hidden[:, 1:, :])
1196
+ mean_last_hidden = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]
1197
+
1198
+ vectors_12288 = self.vector_linear_12288(mean_last_hidden)
1199
+ vectors_1024 = self.vector_linear_1024(mean_last_hidden)
1200
+ vectors_512 = self.vector_linear_512(mean_last_hidden)
1201
+ vectors_256 = self.vector_linear_256(mean_last_hidden)
1202
+
1203
+ sentence_embedding = {
1204
+ "vectors_12288": vectors_12288,
1205
+ "vectors_1024": vectors_1024,
1206
+ "vectors_512": vectors_512,
1207
+ "vectors_256": vectors_256,
1208
+ }[f"vectors_{self.config.vector_dim}"]
1209
+ return {
1210
+ "sentence_embedding": sentence_embedding,
1211
+ "all_vectors": [vectors_12288, vectors_1024, vectors_512, vectors_256],
1212
+ }
1213
+
modules.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ }
8
+ ]
preprocessor_config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "SiglipImageProcessor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "processor_class": "SiglipProcessor",
18
+ "resample": 3,
19
+ "rescale_factor": 0.00392156862745098,
20
+ "size": {
21
+ "height": 384,
22
+ "width": 384
23
+ }
24
+ }
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 2048,
3
+ "do_lower_case": false
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|endoftext|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
tokenization_qwen.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from typing import List, Optional
3
+ from transformers.models.qwen2.tokenization_qwen2 import Qwen2Tokenizer as OriginalQwen2Tokenizer
4
+ from transformers.models.qwen2.tokenization_qwen2_fast import Qwen2TokenizerFast as OriginalQwen2TokenizerFast
5
+ from tokenizers import processors
6
+
7
+ VOCAB_FILES_NAMES = {
8
+ "vocab_file": "vocab.json",
9
+ "merges_file": "merges.txt",
10
+ "tokenizer_file": "tokenizer.json",
11
+ }
12
+
13
+ class Qwen2Tokenizer(OriginalQwen2Tokenizer):
14
+ """
15
+ Construct a Qwen2 tokenizer. Based on byte-level Byte-Pair-Encoding.
16
+
17
+ Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will
18
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
19
+
20
+ ```python
21
+ >>> from transformers import Qwen2Tokenizer
22
+
23
+ >>> tokenizer = Qwen2Tokenizer.from_pretrained("Qwen/Qwen-tokenizer")
24
+ >>> tokenizer("Hello world")["input_ids"]
25
+ [9707, 1879]
26
+
27
+ >>> tokenizer(" Hello world")["input_ids"]
28
+ [21927, 1879]
29
+ ```
30
+ This is expected.
31
+
32
+ You should not use GPT2Tokenizer instead, because of the different pretokenization rules.
33
+
34
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
35
+ this superclass for more information regarding those methods.
36
+
37
+ Args:
38
+ vocab_file (`str`):
39
+ Path to the vocabulary file.
40
+ merges_file (`str`):
41
+ Path to the merges file.
42
+ errors (`str`, *optional*, defaults to `"replace"`):
43
+ Paradigm to follow when decoding bytes to UTF-8. See
44
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
45
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
46
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
47
+ token instead.
48
+ bos_token (`str`, *optional*):
49
+ The beginning of sequence token. Not applicable for this tokenizer.
50
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
51
+ The end of sequence token.
52
+ pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
53
+ The token used for padding, for example when batching sequences of different lengths.
54
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
55
+ Whether or not the model should cleanup the spaces that were added when splitting the input text during the
56
+ tokenization process. Not applicable to this tokenizer, since tokenization does not add spaces.
57
+ split_special_tokens (`bool`, *optional*, defaults to `False`):
58
+ Whether or not the special tokens should be split during the tokenization process. The default behavior is
59
+ to not split special tokens. This means that if `<|endoftext|>` is the `eos_token`, then `tokenizer.tokenize("<|endoftext|>") =
60
+ ['<|endoftext|>`]. Otherwise, if `split_special_tokens=True`, then `tokenizer.tokenize("<|endoftext|>")` will be give `['<',
61
+ '|', 'endo', 'ft', 'ext', '|', '>']`. This argument is only supported for `slow` tokenizers for the moment.
62
+ add_eos_token (`bool`, *optional*, defaults to `False`):
63
+ Whether or not to add an `eos_token` at the end of sequences.
64
+ """
65
+
66
+ def __init__(
67
+ self,
68
+ vocab_file,
69
+ merges_file,
70
+ errors="replace",
71
+ unk_token="<|endoftext|>",
72
+ bos_token=None,
73
+ eos_token="<|endoftext|>",
74
+ pad_token="<|endoftext|>",
75
+ clean_up_tokenization_spaces=False,
76
+ split_special_tokens=False,
77
+ add_eos_token=False,
78
+ **kwargs,
79
+ ):
80
+ # The add_eos_token code was inspired by the LlamaTokenizer
81
+ self.add_eos_token = add_eos_token
82
+
83
+ super().__init__(
84
+ vocab_file=vocab_file,
85
+ merges_file=merges_file,
86
+ errors=errors,
87
+ unk_token=unk_token,
88
+ bos_token=bos_token,
89
+ eos_token=eos_token,
90
+ pad_token=pad_token,
91
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
92
+ split_special_tokens=split_special_tokens,
93
+ add_eos_token=add_eos_token,
94
+ **kwargs,
95
+ )
96
+
97
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
98
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
99
+
100
+ output = token_ids_0 + eos_token_id
101
+
102
+ if token_ids_1 is not None:
103
+ output = output + token_ids_1 + eos_token_id
104
+
105
+ return output
106
+
107
+ def get_special_tokens_mask(
108
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
109
+ ) -> List[int]:
110
+ """
111
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
112
+ special tokens using the tokenizer `prepare_for_model` method.
113
+
114
+ Args:
115
+ token_ids_0 (`List[int]`):
116
+ List of IDs.
117
+ token_ids_1 (`List[int]`, *optional*):
118
+ Optional second list of IDs for sequence pairs.
119
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
120
+ Whether or not the token list is already formatted with special tokens for the model.
121
+
122
+ Returns:
123
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
124
+ """
125
+ if already_has_special_tokens:
126
+ return super().get_special_tokens_mask(
127
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
128
+ )
129
+
130
+ eos_token_id = [1] if self.add_eos_token else []
131
+
132
+ if token_ids_1 is None:
133
+ return ([0] * len(token_ids_0)) + eos_token_id
134
+ return (
135
+ ([0] * len(token_ids_0))
136
+ + eos_token_id
137
+ + ([0] * len(token_ids_1))
138
+ + eos_token_id
139
+ )
140
+
141
+ def create_token_type_ids_from_sequences(
142
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
143
+ ) -> List[int]:
144
+ """
145
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
146
+ sequence pair mask has the following format:
147
+
148
+ ```
149
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
150
+ | first sequence | second sequence |
151
+ ```
152
+
153
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
154
+
155
+ Args:
156
+ token_ids_0 (`List[int]`):
157
+ List of ids.
158
+ token_ids_1 (`List[int]`, *optional*):
159
+ Optional second list of IDs for sequence pairs.
160
+
161
+ Returns:
162
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
163
+ """
164
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
165
+
166
+ output = [0] * len(token_ids_0 + eos_token_id)
167
+
168
+ if token_ids_1 is not None:
169
+ output += [1] * len(token_ids_1 + eos_token_id)
170
+
171
+ return output
172
+
173
+ class Qwen2TokenizerFast(OriginalQwen2TokenizerFast):
174
+ """
175
+ Construct a "fast" Qwen2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
176
+ Byte-Pair-Encoding.
177
+
178
+ Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will
179
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
180
+
181
+ ```python
182
+ >>> from transformers import Qwen2TokenizerFast
183
+
184
+ >>> tokenizer = Qwen2TokenizerFast.from_pretrained("Qwen/Qwen-tokenizer")
185
+ >>> tokenizer("Hello world")["input_ids"]
186
+ [9707, 1879]
187
+
188
+ >>> tokenizer(" Hello world")["input_ids"]
189
+ [21927, 1879]
190
+ ```
191
+ This is expected.
192
+
193
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
194
+ refer to this superclass for more information regarding those methods.
195
+
196
+ Args:
197
+ vocab_file (`str`, *optional*):
198
+ Path to the vocabulary file.
199
+ merges_file (`str`, *optional*):
200
+ Path to the merges file.
201
+ tokenizer_file (`str`, *optional*):
202
+ Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
203
+ contains everything needed to load the tokenizer.
204
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
205
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
206
+ token instead. Not applicable to this tokenizer.
207
+ bos_token (`str`, *optional*):
208
+ The beginning of sequence token. Not applicable for this tokenizer.
209
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
210
+ The end of sequence token.
211
+ pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
212
+ The token used for padding, for example when batching sequences of different lengths.
213
+ add_eos_token (`bool`, *optional*, defaults to `False`):
214
+ Whether or not to add an `eos_token` at the end of sequences.
215
+ """
216
+
217
+ slow_tokenizer_class = Qwen2Tokenizer
218
+ padding_side = "left"
219
+
220
+ def __init__(
221
+ self,
222
+ vocab_file=None,
223
+ merges_file=None,
224
+ tokenizer_file=None,
225
+ unk_token="<|endoftext|>",
226
+ bos_token=None,
227
+ eos_token="<|endoftext|>",
228
+ pad_token="<|endoftext|>",
229
+ add_eos_token=False,
230
+ **kwargs,
231
+ ):
232
+ super().__init__(
233
+ vocab_file=vocab_file,
234
+ merges_file=merges_file,
235
+ tokenizer_file=tokenizer_file,
236
+ unk_token=unk_token,
237
+ bos_token=bos_token,
238
+ eos_token=eos_token,
239
+ pad_token=pad_token,
240
+ **kwargs,
241
+ )
242
+
243
+ self._add_eos_token = add_eos_token
244
+ self.update_post_processor()
245
+
246
+ def update_post_processor(self):
247
+ """
248
+ Updates the underlying post processor with the current `eos_token`.
249
+ """
250
+ eos = self.eos_token
251
+ eos_token_id = self.eos_token_id
252
+ if eos is None and self.add_eos_token:
253
+ raise ValueError("add_eos_token = True but eos_token = None")
254
+
255
+ single = f"$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
256
+ pair = f"{single} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
257
+
258
+ special_tokens = []
259
+ if self.add_eos_token:
260
+ special_tokens.append((eos, eos_token_id))
261
+ self._tokenizer.post_processor = processors.TemplateProcessing(
262
+ single=single, pair=pair, special_tokens=special_tokens
263
+ )
264
+
265
+ @property
266
+ def add_eos_token(self):
267
+ return self._add_eos_token
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9201dff769c05001d0cd535e29102c00086a0a55390de1de3b5e325ce03d920e
3
+ size 11419626
tokenizer_config.json ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "151646": {
29
+ "content": "<|jasper_img_start|>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "151647": {
37
+ "content": "<|jasper_img_token|>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "151648": {
45
+ "content": "<|jasper_img_end|>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ }
52
+ },
53
+ "additional_special_tokens": [
54
+ "<|im_start|>",
55
+ "<|im_end|>"
56
+ ],
57
+ "auto_map": {
58
+ "AutoTokenizer": [
59
+ "tokenization_qwen.Qwen2Tokenizer",
60
+ "tokenization_qwen.Qwen2TokenizerFast"
61
+ ]
62
+ },
63
+ "bos_token": null,
64
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
65
+ "clean_up_tokenization_spaces": false,
66
+ "eos_token": "<|endoftext|>",
67
+ "errors": "replace",
68
+ "model_max_length": 32768,
69
+ "pad_token": "<|endoftext|>",
70
+ "split_special_tokens": false,
71
+ "tokenizer_class": "Qwen2Tokenizer",
72
+ "unk_token": null,
73
+ "add_eos_token": true
74
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff