ycchen commited on
Commit
b8b41fe
1 Parent(s): 9d3195e

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Qwen-14B-8bit-wikitext-ex512-len4096-hf-safetensors",
3
+ "architectures": [
4
+ "QWenLMHeadModel"
5
+ ],
6
+ "attn_dropout_prob": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_qwen.QWenConfig",
9
+ "AutoModelForCausalLM": "modeling_qwen.QWenLMHeadModel"
10
+ },
11
+ "bf16": false,
12
+ "emb_dropout_prob": 0.0,
13
+ "fp16": true,
14
+ "fp32": false,
15
+ "hidden_size": 5120,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 27392,
18
+ "kv_channels": 128,
19
+ "layer_norm_epsilon": 1e-06,
20
+ "max_position_embeddings": 8192,
21
+ "model_type": "qwen",
22
+ "no_bias": true,
23
+ "num_attention_heads": 40,
24
+ "num_hidden_layers": 40,
25
+ "onnx_safe": null,
26
+ "quantization_config": {
27
+ "batch_size": 1,
28
+ "bits": 8,
29
+ "block_name_to_quantize": null,
30
+ "damp_percent": 0.01,
31
+ "dataset": null,
32
+ "desc_act": false,
33
+ "disable_exllama": false,
34
+ "group_size": 128,
35
+ "model_seqlen": null,
36
+ "module_name_preceding_first_block": null,
37
+ "pad_token_id": null,
38
+ "quant_method": "gptq",
39
+ "sym": true,
40
+ "tokenizer": null,
41
+ "true_sequential": true,
42
+ "use_cuda_fp16": false
43
+ },
44
+ "rotary_emb_base": 10000,
45
+ "rotary_pct": 1.0,
46
+ "scale_attn_weights": true,
47
+ "seq_length": 2048,
48
+ "softmax_in_fp32": false,
49
+ "tie_word_embeddings": false,
50
+ "tokenizer_class": "QWenTokenizer",
51
+ "torch_dtype": "float16",
52
+ "transformers_version": "4.32.0",
53
+ "use_cache": true,
54
+ "use_cache_kernel": false,
55
+ "use_cache_quantization": false,
56
+ "use_dynamic_ntk": true,
57
+ "use_flash_attn": true,
58
+ "use_logn_attn": true,
59
+ "vocab_size": 152064
60
+ }
config_raw.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Qwen/Qwen-14B",
3
+ "architectures": [
4
+ "QWenLMHeadModel"
5
+ ],
6
+ "attn_dropout_prob": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "Qwen/Qwen-14B--configuration_qwen.QWenConfig",
9
+ "AutoModelForCausalLM": "Qwen/Qwen-14B--modeling_qwen.QWenLMHeadModel"
10
+ },
11
+ "bf16": true,
12
+ "emb_dropout_prob": 0.0,
13
+ "fp16": false,
14
+ "fp32": false,
15
+ "hidden_size": 5120,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 27392,
18
+ "kv_channels": 128,
19
+ "layer_norm_epsilon": 1e-06,
20
+ "max_position_embeddings": 8192,
21
+ "model_type": "qwen",
22
+ "no_bias": true,
23
+ "num_attention_heads": 40,
24
+ "num_hidden_layers": 40,
25
+ "onnx_safe": null,
26
+ "rotary_emb_base": 10000,
27
+ "rotary_pct": 1.0,
28
+ "scale_attn_weights": true,
29
+ "seq_length": 2048,
30
+ "softmax_in_fp32": false,
31
+ "tie_word_embeddings": false,
32
+ "tokenizer_class": "QWenTokenizer",
33
+ "torch_dtype": "float16",
34
+ "transformers_version": "4.32.0",
35
+ "use_cache": true,
36
+ "use_cache_kernel": false,
37
+ "use_cache_quantization": false,
38
+ "use_dynamic_ntk": true,
39
+ "use_flash_attn": true,
40
+ "use_logn_attn": true,
41
+ "vocab_size": 152064
42
+ }
configuration_qwen.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ from transformers import PretrainedConfig
7
+
8
+
9
+ class QWenConfig(PretrainedConfig):
10
+ model_type = "qwen"
11
+ keys_to_ignore_at_inference = ["past_key_values"]
12
+
13
+ def __init__(
14
+ self,
15
+ vocab_size=151936,
16
+ hidden_size=4096,
17
+ num_hidden_layers=32,
18
+ num_attention_heads=32,
19
+ emb_dropout_prob=0.0,
20
+ attn_dropout_prob=0.0,
21
+ layer_norm_epsilon=1e-6,
22
+ initializer_range=0.02,
23
+ max_position_embeddings=8192,
24
+ scale_attn_weights=True,
25
+ use_cache=True,
26
+ bf16=False,
27
+ fp16=False,
28
+ fp32=False,
29
+ kv_channels=128,
30
+ rotary_pct=1.0,
31
+ rotary_emb_base=10000,
32
+ use_dynamic_ntk=True,
33
+ use_logn_attn=True,
34
+ use_flash_attn="auto",
35
+ intermediate_size=22016,
36
+ no_bias=True,
37
+ tie_word_embeddings=False,
38
+ use_cache_quantization=False,
39
+ use_cache_kernel=False,
40
+ softmax_in_fp32=False,
41
+ **kwargs,
42
+ ):
43
+ self.vocab_size = vocab_size
44
+ self.hidden_size = hidden_size
45
+ self.intermediate_size = intermediate_size
46
+ self.num_hidden_layers = num_hidden_layers
47
+ self.num_attention_heads = num_attention_heads
48
+ self.emb_dropout_prob = emb_dropout_prob
49
+ self.attn_dropout_prob = attn_dropout_prob
50
+ self.layer_norm_epsilon = layer_norm_epsilon
51
+ self.initializer_range = initializer_range
52
+ self.scale_attn_weights = scale_attn_weights
53
+ self.use_cache = use_cache
54
+ self.max_position_embeddings = max_position_embeddings
55
+ self.bf16 = bf16
56
+ self.fp16 = fp16
57
+ self.fp32 = fp32
58
+ self.kv_channels = kv_channels
59
+ self.rotary_pct = rotary_pct
60
+ self.rotary_emb_base = rotary_emb_base
61
+ self.use_dynamic_ntk = use_dynamic_ntk
62
+ self.use_logn_attn = use_logn_attn
63
+ self.use_flash_attn = use_flash_attn
64
+ self.no_bias = no_bias
65
+ self.use_cache_quantization = use_cache_quantization
66
+ self.use_cache_kernel = use_cache_kernel
67
+ self.softmax_in_fp32 = softmax_in_fp32
68
+ super().__init__(
69
+ tie_word_embeddings=tie_word_embeddings,
70
+ **kwargs
71
+ )
cpp_kernels.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.utils import cpp_extension
2
+ import pathlib
3
+ import os
4
+ import subprocess
5
+
6
+ def _get_cuda_bare_metal_version(cuda_dir):
7
+ raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"],
8
+ universal_newlines=True)
9
+ output = raw_output.split()
10
+ release_idx = output.index("release") + 1
11
+ release = output[release_idx].split(".")
12
+ bare_metal_major = release[0]
13
+ bare_metal_minor = release[1][0]
14
+
15
+ return raw_output, bare_metal_major, bare_metal_minor
16
+
17
+ def _create_build_dir(buildpath):
18
+ try:
19
+ os.mkdir(buildpath)
20
+ except OSError:
21
+ if not os.path.isdir(buildpath):
22
+ print(f"Creation of the build directory {buildpath} failed")
23
+
24
+ # Check if cuda 11 is installed for compute capability 8.0
25
+ cc_flag = []
26
+ _, bare_metal_major, bare_metal_minor = _get_cuda_bare_metal_version(cpp_extension.CUDA_HOME)
27
+ if int(bare_metal_major) >= 11:
28
+ cc_flag.append('-gencode')
29
+ cc_flag.append('arch=compute_80,code=sm_80')
30
+ if int(bare_metal_minor) >= 7:
31
+ cc_flag.append('-gencode')
32
+ cc_flag.append('arch=compute_90,code=sm_90')
33
+
34
+ # Build path
35
+ srcpath = pathlib.Path(__file__).parent.absolute()
36
+ buildpath = srcpath / 'build'
37
+ _create_build_dir(buildpath)
38
+
39
+ def _cpp_extention_load_helper(name, sources, extra_cuda_flags):
40
+ return cpp_extension.load(
41
+ name=name,
42
+ sources=sources,
43
+ build_directory=buildpath,
44
+ extra_cflags=['-O3', ],
45
+ extra_cuda_cflags=['-O3',
46
+ '-gencode', 'arch=compute_70,code=sm_70',
47
+ '--use_fast_math'] + extra_cuda_flags + cc_flag,
48
+ verbose=1
49
+ )
50
+
51
+ extra_flags = []
52
+
53
+ cache_autogptq_cuda_256_sources = ["./cache_autogptq_cuda_256.cpp",
54
+ "./cache_autogptq_cuda_kernel_256.cu"]
55
+ cache_autogptq_cuda_256 = _cpp_extention_load_helper("cache_autogptq_cuda_256", cache_autogptq_cuda_256_sources, extra_flags)
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.32.0"
4
+ }
modeling_qwen.py ADDED
@@ -0,0 +1,1435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ import importlib
7
+ import math
8
+ from typing import TYPE_CHECKING, Optional, Tuple, Union, Callable, List, Any, Generator
9
+
10
+ import torch
11
+ import torch.nn.functional as F
12
+ import torch.utils.checkpoint
13
+ from torch.cuda.amp import autocast
14
+
15
+ from torch.nn import CrossEntropyLoss
16
+ from transformers import PreTrainedTokenizer, GenerationConfig, StoppingCriteriaList
17
+ from transformers.generation.logits_process import LogitsProcessorList
18
+
19
+ if TYPE_CHECKING:
20
+ from transformers.generation.streamers import BaseStreamer
21
+ from transformers.generation.utils import GenerateOutput
22
+ from transformers.modeling_outputs import (
23
+ BaseModelOutputWithPast,
24
+ CausalLMOutputWithPast,
25
+ )
26
+ from transformers.modeling_utils import PreTrainedModel
27
+ from transformers.utils import logging
28
+
29
+ try:
30
+ from einops import rearrange
31
+ except ImportError:
32
+ rearrange = None
33
+ from torch import nn
34
+
35
+ SUPPORT_CUDA = torch.cuda.is_available()
36
+ SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported()
37
+ SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7
38
+ SUPPORT_TORCH2 = hasattr(torch, '__version__') and int(torch.__version__.split(".")[0]) >= 2
39
+
40
+
41
+ from .configuration_qwen import QWenConfig
42
+ from .qwen_generation_utils import (
43
+ HistoryType,
44
+ make_context,
45
+ decode_tokens,
46
+ get_stop_words_ids,
47
+ StopWordsLogitsProcessor,
48
+ )
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ _CHECKPOINT_FOR_DOC = "qwen"
54
+ _CONFIG_FOR_DOC = "QWenConfig"
55
+
56
+ QWen_PRETRAINED_MODEL_ARCHIVE_LIST = ["qwen-7b"]
57
+
58
+ _ERROR_BAD_CHAT_FORMAT = """\
59
+ We detect you are probably using the pretrained model (rather than chat model) for chatting, since the chat_format in generation_config is not "chatml".
60
+ If you are directly using the model downloaded from Huggingface, please make sure you are using our "Qwen/Qwen-7B-Chat" Huggingface model (rather than "Qwen/Qwen-7B") when you call model.chat().
61
+ 我们检测到您可能在使用预训练模型(而非chat模型)进行多轮chat,因为您当前在generation_config指定的chat_format,并未设置为我们在对话中所支持的"chatml"格式。
62
+ 如果您在直接使用我们从Huggingface提供的模型,请确保您在调用model.chat()时,使用的是"Qwen/Qwen-7B-Chat"模型(而非"Qwen/Qwen-7B"预训练模型)。
63
+ """
64
+
65
+ _SENTINEL = object()
66
+ _ERROR_STREAM_IN_CHAT = """\
67
+ Pass argument `stream` to model.chat() is buggy, deprecated, and marked for removal. Please use model.chat_stream(...) instead of model.chat(..., stream=True).
68
+ 向model.chat()传入参数stream的用法可能存在Bug,该用法已被废弃,将在未来被移除。请使用model.chat_stream(...)代替model.chat(..., stream=True)。
69
+ """
70
+
71
+ _ERROR_INPUT_CPU_QUERY_WITH_FLASH_ATTN_ACTIVATED = """\
72
+ We detect you have activated flash attention support, but running model computation on CPU. Please make sure that your input data has been placed on GPU. If you actually want to run CPU computation, please following the readme and set device_map="cpu" to disable flash attention when loading the model (calling AutoModelForCausalLM.from_pretrained).
73
+ 检测到您的模型已激活了flash attention支持,但正在执行CPU运算任务。如使用flash attention,请您确认模型输入已经传到GPU上。如果您确认要执行CPU运算,请您在载入模型(调用AutoModelForCausalLM.from_pretrained)时,按照readme说法,指定device_map="cpu"以禁用flash attention。
74
+ """
75
+
76
+ apply_rotary_emb_func = None
77
+ rms_norm = None
78
+ flash_attn_unpadded_func = None
79
+
80
+ def _import_flash_attn():
81
+ global apply_rotary_emb_func, rms_norm, flash_attn_unpadded_func
82
+ try:
83
+ from flash_attn.layers.rotary import apply_rotary_emb_func as __apply_rotary_emb_func
84
+ apply_rotary_emb_func = __apply_rotary_emb_func
85
+ except ImportError:
86
+ logger.warn(
87
+ "Warning: import flash_attn rotary fail, please install FlashAttention rotary to get higher efficiency "
88
+ "https://github.com/Dao-AILab/flash-attention/tree/main/csrc/rotary"
89
+ )
90
+
91
+ try:
92
+ from flash_attn.ops.rms_norm import rms_norm as __rms_norm
93
+ rms_norm = __rms_norm
94
+ except ImportError:
95
+ logger.warn(
96
+ "Warning: import flash_attn rms_norm fail, please install FlashAttention layer_norm to get higher efficiency "
97
+ "https://github.com/Dao-AILab/flash-attention/tree/main/csrc/layer_norm"
98
+ )
99
+
100
+ try:
101
+ import flash_attn
102
+ if not hasattr(flash_attn, '__version__'):
103
+ from flash_attn.flash_attn_interface import flash_attn_unpadded_func as __flash_attn_unpadded_func
104
+ else:
105
+ if int(flash_attn.__version__.split(".")[0]) >= 2:
106
+ from flash_attn.flash_attn_interface import flash_attn_varlen_func as __flash_attn_unpadded_func
107
+ else:
108
+ from flash_attn.flash_attn_interface import flash_attn_unpadded_func as __flash_attn_unpadded_func
109
+ flash_attn_unpadded_func = __flash_attn_unpadded_func
110
+ except ImportError:
111
+ logger.warn(
112
+ "Warning: import flash_attn fail, please install FlashAttention to get higher efficiency "
113
+ "https://github.com/Dao-AILab/flash-attention"
114
+ )
115
+
116
+ def quantize_cache_v(fdata, bits, qmax, qmin):
117
+ # b, s, head, h-dim->b, head, s, h-dim
118
+ qtype = torch.uint8
119
+ device = fdata.device
120
+ shape = fdata.shape
121
+
122
+ fdata_cal = torch.flatten(fdata, 2)
123
+ fmax = torch.amax(fdata_cal, dim=-1, keepdim=True)
124
+ fmin = torch.amin(fdata_cal, dim=-1, keepdim=True)
125
+ # Compute params
126
+ if qmax.device != fmax.device:
127
+ qmax = qmax.to(device)
128
+ qmin = qmin.to(device)
129
+ scale = (fmax - fmin) / (qmax - qmin)
130
+ zero = qmin - fmin / scale
131
+ scale = scale.unsqueeze(-1).repeat(1,1,shape[2],1).contiguous()
132
+ zero = zero.unsqueeze(-1).repeat(1,1,shape[2],1).contiguous()
133
+ # Quantize
134
+ res_data = fdata / scale + zero
135
+ qdata = torch.clamp(res_data, qmin, qmax).to(qtype)
136
+ return qdata.contiguous(), scale, zero
137
+
138
+ def dequantize_cache_torch(qdata, scale, zero):
139
+ data = scale * (qdata - zero)
140
+ return data
141
+
142
+ class FlashSelfAttention(torch.nn.Module):
143
+ def __init__(
144
+ self,
145
+ causal=False,
146
+ softmax_scale=None,
147
+ attention_dropout=0.0,
148
+ ):
149
+ super().__init__()
150
+ assert flash_attn_unpadded_func is not None, (
151
+ "Please install FlashAttention first, " "e.g., with pip install flash-attn"
152
+ )
153
+ assert (
154
+ rearrange is not None
155
+ ), "Please install einops first, e.g., with pip install einops"
156
+ self.causal = causal
157
+ self.softmax_scale = softmax_scale
158
+ self.dropout_p = attention_dropout
159
+
160
+ def unpad_input(self, hidden_states, attention_mask):
161
+ valid_mask = attention_mask.squeeze(1).squeeze(1).eq(0)
162
+ seqlens_in_batch = valid_mask.sum(dim=-1, dtype=torch.int32)
163
+ indices = torch.nonzero(valid_mask.flatten(), as_tuple=False).flatten()
164
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
165
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
166
+ hidden_states = hidden_states[indices]
167
+ return hidden_states, indices, cu_seqlens, max_seqlen_in_batch
168
+
169
+ def pad_input(self, hidden_states, indices, batch, seqlen):
170
+ output = torch.zeros(batch * seqlen, *hidden_states.shape[1:], device=hidden_states.device,
171
+ dtype=hidden_states.dtype)
172
+ output[indices] = hidden_states
173
+ return rearrange(output, '(b s) ... -> b s ...', b=batch)
174
+
175
+ def forward(self, q, k, v, attention_mask=None):
176
+ assert all((i.dtype in [torch.float16, torch.bfloat16] for i in (q, k, v)))
177
+ assert all((i.is_cuda for i in (q, k, v)))
178
+ batch_size, seqlen_q = q.shape[0], q.shape[1]
179
+ seqlen_k = k.shape[1]
180
+ seqlen_out = seqlen_q
181
+
182
+ q, k, v = [rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v]]
183
+ cu_seqlens_q = torch.arange(
184
+ 0,
185
+ (batch_size + 1) * seqlen_q,
186
+ step=seqlen_q,
187
+ dtype=torch.int32,
188
+ device=q.device,
189
+ )
190
+
191
+ if batch_size > 1 and attention_mask is not None:
192
+ k, indices_k, cu_seqlens_k, seqlen_k = self.unpad_input(k, attention_mask)
193
+ if q.size(0) == v.size(0):
194
+ q = q[indices_k]
195
+ cu_seqlens_q = cu_seqlens_k
196
+ seqlen_q = seqlen_k
197
+ v = v[indices_k]
198
+ else:
199
+ cu_seqlens_k = torch.arange(
200
+ 0,
201
+ (batch_size + 1) * seqlen_k,
202
+ step=seqlen_k,
203
+ dtype=torch.int32,
204
+ device=q.device,
205
+ )
206
+
207
+ if self.training:
208
+ assert seqlen_k == seqlen_q
209
+ is_causal = self.causal
210
+ dropout_p = self.dropout_p
211
+ else:
212
+ is_causal = seqlen_q == seqlen_k
213
+ dropout_p = 0
214
+
215
+ output = flash_attn_unpadded_func(
216
+ q,
217
+ k,
218
+ v,
219
+ cu_seqlens_q,
220
+ cu_seqlens_k,
221
+ seqlen_q,
222
+ seqlen_k,
223
+ dropout_p,
224
+ softmax_scale=self.softmax_scale,
225
+ causal=is_causal,
226
+ )
227
+ if batch_size > 1 and attention_mask is not None and seqlen_q == seqlen_k:
228
+ output = self.pad_input(output, indices_k, batch_size, seqlen_out)
229
+ else:
230
+ new_shape = (batch_size, output.shape[0] // batch_size) + output.shape[1:]
231
+ output = output.view(new_shape)
232
+ return output
233
+
234
+
235
+ class QWenAttention(nn.Module):
236
+ def __init__(self, config):
237
+ super().__init__()
238
+
239
+ self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False)
240
+ self.seq_length = config.seq_length
241
+
242
+ self.hidden_size = config.hidden_size
243
+ self.split_size = config.hidden_size
244
+ self.num_heads = config.num_attention_heads
245
+ self.head_dim = self.hidden_size // self.num_heads
246
+
247
+ self.use_flash_attn = config.use_flash_attn
248
+ self.scale_attn_weights = True
249
+
250
+ self.projection_size = config.kv_channels * config.num_attention_heads
251
+
252
+ assert self.projection_size % config.num_attention_heads == 0
253
+ self.hidden_size_per_attention_head = (
254
+ self.projection_size // config.num_attention_heads
255
+ )
256
+
257
+ self.c_attn = nn.Linear(config.hidden_size, 3 * self.projection_size)
258
+
259
+ self.c_proj = nn.Linear(
260
+ config.hidden_size, self.projection_size, bias=not config.no_bias
261
+ )
262
+
263
+ self.is_fp32 = not (config.bf16 or config.fp16)
264
+ if (
265
+ self.use_flash_attn
266
+ and flash_attn_unpadded_func is not None
267
+ and not self.is_fp32
268
+ ):
269
+ self.core_attention_flash = FlashSelfAttention(
270
+ causal=True, attention_dropout=config.attn_dropout_prob
271
+ )
272
+ self.bf16 = config.bf16
273
+
274
+ self.use_dynamic_ntk = config.use_dynamic_ntk
275
+ self.use_logn_attn = config.use_logn_attn
276
+
277
+ logn_list = [
278
+ math.log(i, self.seq_length) if i > self.seq_length else 1
279
+ for i in range(1, 32768)
280
+ ]
281
+ logn_tensor = torch.tensor(logn_list)[None, :, None, None]
282
+ self.register_buffer("logn_tensor", logn_tensor, persistent=False)
283
+
284
+ self.attn_dropout = nn.Dropout(config.attn_dropout_prob)
285
+ self.softmax_in_fp32 = config.softmax_in_fp32 if hasattr(config, 'softmax_in_fp32') else False
286
+ self.use_cache_quantization = config.use_cache_quantization if hasattr(config, 'use_cache_quantization') else False
287
+ self.use_cache_kernel = config.use_cache_kernel if hasattr(config,'use_cache_kernel') else False
288
+ cache_dtype = torch.float
289
+ if self.bf16:
290
+ cache_dtype=torch.bfloat16
291
+ elif config.fp16:
292
+ cache_dtype = torch.float16
293
+ self.cache_qmax = torch.tensor(torch.iinfo(torch.uint8).max, dtype=cache_dtype)
294
+ self.cache_qmin = torch.tensor(torch.iinfo(torch.uint8).min, dtype=cache_dtype)
295
+
296
+ if config.use_cache_quantization and config.use_cache_kernel:
297
+ from .cpp_kernels import cache_autogptq_cuda_256
298
+ try:
299
+ self.cache_kernels = cache_autogptq_cuda_256
300
+ except ImportError:
301
+ self.cache_kernels = None
302
+
303
+ def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None):
304
+ device = query.device
305
+ if self.use_cache_quantization:
306
+ qk, qk_scale, qk_zero = key
307
+ if self.use_cache_kernel and self.cache_kernels is not None:
308
+ shape = query.shape[:-1] + (qk.shape[-2],)
309
+ attn_weights = torch.zeros(shape, dtype=torch.float16, device=device)
310
+ self.cache_kernels.vecquant8matmul_batched_faster_old(
311
+ query.contiguous() if query.dtype == torch.float16 else query.to(torch.float16).contiguous(),
312
+ qk.transpose(-1, -2).contiguous(),
313
+ attn_weights,
314
+ qk_scale.contiguous() if qk_scale.dtype == torch.float16 else qk_scale.to(torch.float16).contiguous(),
315
+ qk_zero.contiguous()if qk_zero.dtype == torch.float16 else qk_zero.to(torch.float16).contiguous())
316
+ # attn_weights = attn_weights.to(query.dtype).contiguous()
317
+ else:
318
+ key = dequantize_cache_torch(qk, qk_scale, qk_zero)
319
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
320
+ else:
321
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
322
+
323
+ if self.scale_attn_weights:
324
+ if self.use_cache_quantization:
325
+ size_temp = value[0].size(-1)
326
+ else:
327
+ size_temp = value.size(-1)
328
+ attn_weights = attn_weights / torch.full(
329
+ [],
330
+ size_temp ** 0.5,
331
+ dtype=attn_weights.dtype,
332
+ device=attn_weights.device,
333
+ )
334
+ if self.use_cache_quantization:
335
+ query_length, key_length = query.size(-2), key[0].size(-2)
336
+ else:
337
+ query_length, key_length = query.size(-2), key.size(-2)
338
+ causal_mask = registered_causal_mask[
339
+ :, :, key_length - query_length : key_length, :key_length
340
+ ]
341
+ mask_value = torch.finfo(attn_weights.dtype).min
342
+ mask_value = torch.full([], mask_value, dtype=attn_weights.dtype).to(
343
+ attn_weights.device
344
+ )
345
+ attn_weights = torch.where(
346
+ causal_mask, attn_weights.to(attn_weights.dtype), mask_value
347
+ )
348
+
349
+ if attention_mask is not None:
350
+ attn_weights = attn_weights + attention_mask
351
+
352
+ if self.softmax_in_fp32:
353
+ attn_weights = nn.functional.softmax(attn_weights.float(), dim=-1)
354
+ else:
355
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
356
+
357
+ attn_weights = attn_weights.type(query.dtype)
358
+ attn_weights = self.attn_dropout(attn_weights)
359
+
360
+ if head_mask is not None:
361
+ attn_weights = attn_weights * head_mask
362
+
363
+ if self.use_cache_quantization:
364
+ qv, qv_scale, qv_zero = value
365
+ if self.use_cache_kernel and self.cache_kernels is not None:
366
+ shape = attn_weights.shape[:-1] + (query.shape[-1],)
367
+ attn_output = torch.zeros(shape, dtype=torch.float16, device=device)
368
+ self.cache_kernels.vecquant8matmul_batched_column_compression_faster_old(
369
+ attn_weights.contiguous() if attn_weights.dtype == torch.float16 else attn_weights.to(torch.float16).contiguous(),
370
+ qv.contiguous(), # dtype: int32
371
+ attn_output,
372
+ qv_scale.contiguous() if qv_scale.dtype == torch.float16 else qv_scale.to(torch.float16).contiguous(),
373
+ qv_zero.contiguous() if qv_zero.dtype == torch.float16 else qv_zero.to(torch.float16).contiguous())
374
+ if attn_output.dtype != query.dtype:
375
+ attn_output = attn_output.to(query.dtype)
376
+ attn_weights = attn_weights.to(query.dtype)
377
+ else:
378
+ value = dequantize_cache_torch(qv, qv_scale, qv_zero)
379
+ attn_output = torch.matmul(attn_weights, value)
380
+ else:
381
+ attn_output = torch.matmul(attn_weights, value)
382
+
383
+ attn_output = attn_output.transpose(1, 2)
384
+
385
+ return attn_output, attn_weights
386
+
387
+ def _upcast_and_reordered_attn(
388
+ self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None
389
+ ):
390
+ bsz, num_heads, q_seq_len, dk = query.size()
391
+ _, _, k_seq_len, _ = key.size()
392
+
393
+ attn_weights = torch.empty(
394
+ bsz * num_heads,
395
+ q_seq_len,
396
+ k_seq_len,
397
+ dtype=torch.float32,
398
+ device=query.device,
399
+ )
400
+
401
+ scale_factor = 1.0
402
+ if self.scale_attn_weights:
403
+ scale_factor /= float(value.size(-1)) ** 0.5
404
+
405
+ with autocast(enabled=False):
406
+ q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(
407
+ -1, dk, k_seq_len
408
+ )
409
+ attn_weights = torch.baddbmm(
410
+ attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor
411
+ )
412
+ attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
413
+
414
+ query_length, key_length = query.size(-2), key.size(-2)
415
+ causal_mask = registered_causal_mask[
416
+ :, :, key_length - query_length : key_length, :key_length
417
+ ]
418
+ mask_value = torch.finfo(attn_weights.dtype).min
419
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(
420
+ attn_weights.device
421
+ )
422
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
423
+
424
+ if attention_mask is not None:
425
+ attn_weights = attn_weights + attention_mask
426
+
427
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
428
+
429
+ if attn_weights.dtype != torch.float32:
430
+ raise RuntimeError(
431
+ "Error with upcasting, attn_weights does not have dtype torch.float32"
432
+ )
433
+ attn_weights = attn_weights.type(value.dtype)
434
+ attn_weights = self.attn_dropout(attn_weights)
435
+
436
+ if head_mask is not None:
437
+ attn_weights = attn_weights * head_mask
438
+
439
+ attn_output = torch.matmul(attn_weights, value)
440
+
441
+ return attn_output, attn_weights
442
+
443
+ def _split_heads(self, tensor, num_heads, attn_head_size):
444
+ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
445
+ tensor = tensor.view(new_shape)
446
+ return tensor
447
+
448
+ def _merge_heads(self, tensor, num_heads, attn_head_size):
449
+ tensor = tensor.contiguous()
450
+ new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
451
+ return tensor.view(new_shape)
452
+
453
+ def forward(
454
+ self,
455
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
456
+ rotary_pos_emb_list: Optional[List[List[torch.Tensor]]] = None,
457
+ registered_causal_mask: Optional[torch.Tensor] = None,
458
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
459
+ attention_mask: Optional[torch.FloatTensor] = None,
460
+ head_mask: Optional[torch.FloatTensor] = None,
461
+ encoder_hidden_states: Optional[torch.Tensor] = None,
462
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
463
+ output_attentions: Optional[bool] = False,
464
+ use_cache: Optional[bool] = False,
465
+ ):
466
+ mixed_x_layer = self.c_attn(hidden_states)
467
+
468
+ query, key, value = mixed_x_layer.split(self.split_size, dim=2)
469
+
470
+ query = self._split_heads(query, self.num_heads, self.head_dim)
471
+ key = self._split_heads(key, self.num_heads, self.head_dim)
472
+ value = self._split_heads(value, self.num_heads, self.head_dim)
473
+
474
+ if rotary_pos_emb_list is not None:
475
+ cur_len = query.shape[1]
476
+ if len(rotary_pos_emb_list) == 1:
477
+ rotary_pos_emb = rotary_pos_emb_list[0]
478
+ rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb]
479
+ rotary_pos_emb = (rotary_pos_emb,) * 2
480
+ q_pos_emb, k_pos_emb = rotary_pos_emb
481
+ # Slice the pos emb for current inference
482
+ query = apply_rotary_pos_emb(query, q_pos_emb)
483
+ key = apply_rotary_pos_emb(key, k_pos_emb)
484
+ else:
485
+ query_list = []
486
+ key_list = []
487
+ for i, rotary_pos_emb in enumerate(rotary_pos_emb_list):
488
+ rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb]
489
+ rotary_pos_emb = (rotary_pos_emb,) * 2
490
+ q_pos_emb, k_pos_emb = rotary_pos_emb
491
+ # Slice the pos emb for current inference
492
+ query_list += [apply_rotary_pos_emb(query[i:i+1, :, :], q_pos_emb)]
493
+ key_list += [apply_rotary_pos_emb(key[i:i+1, :, :], k_pos_emb)]
494
+ query = torch.cat(query_list, dim=0)
495
+ key = torch.cat(key_list, dim=0)
496
+
497
+ if self.use_cache_quantization:
498
+ key = quantize_cache_v(key.permute(0, 2, 1, 3),
499
+ bits=8,
500
+ qmin=self.cache_qmin,
501
+ qmax=self.cache_qmax)
502
+ value = quantize_cache_v(value.permute(0, 2, 1, 3),
503
+ bits=8,
504
+ qmin=self.cache_qmin,
505
+ qmax=self.cache_qmax)
506
+
507
+
508
+ if layer_past is not None:
509
+ past_key, past_value = layer_past[0], layer_past[1]
510
+ if self.use_cache_quantization:
511
+ # use_cache_quantization:
512
+ # present=((q_key,key_scale,key_zero_point),
513
+ # (q_value,value_scale,value_zero_point))
514
+ key = (torch.cat((past_key[0], key[0]), dim=2),
515
+ torch.cat((past_key[1], key[1]), dim=2),
516
+ torch.cat((past_key[2], key[2]), dim=2))
517
+ value = (torch.cat((past_value[0], value[0]), dim=2),
518
+ torch.cat((past_value[1], value[1]), dim=2),
519
+ torch.cat((past_value[2], value[2]), dim=2))
520
+ else:
521
+ # not use_cache_quantization:
522
+ # present=(key,value)
523
+ key = torch.cat((past_key, key), dim=1)
524
+ value = torch.cat((past_value, value), dim=1)
525
+
526
+ if use_cache:
527
+ present = (key, value)
528
+ else:
529
+ present = None
530
+
531
+ if self.use_logn_attn and not self.training:
532
+ if self.use_cache_quantization:
533
+ seq_start = key[0].size(2) - query.size(1)
534
+ seq_end = key[0].size(2)
535
+ else:
536
+ seq_start = key.size(1) - query.size(1)
537
+ seq_end = key.size(1)
538
+ logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :].type_as(query)
539
+ query = query * logn_tensor.expand_as(query)
540
+
541
+ if (
542
+ self.use_flash_attn
543
+ and flash_attn_unpadded_func is not None
544
+ and not self.is_fp32
545
+ and query.is_cuda
546
+ ):
547
+ q, k, v = query, key, value
548
+ attn_output = self.core_attention_flash(q, k, v, attention_mask=attention_mask)
549
+ else:
550
+ query = query.permute(0, 2, 1, 3)
551
+ if not self.use_cache_quantization:
552
+ key = key.permute(0, 2, 1, 3)
553
+ value = value.permute(0, 2, 1, 3)
554
+ if (
555
+ registered_causal_mask is None
556
+ and self.use_flash_attn
557
+ and flash_attn_unpadded_func is not None
558
+ and not self.is_fp32
559
+ and not query.is_cuda
560
+ ):
561
+ raise Exception(_ERROR_INPUT_CPU_QUERY_WITH_FLASH_ATTN_ACTIVATED)
562
+
563
+ if not self.use_cache_quantization and SUPPORT_TORCH2:
564
+ causal_mask = registered_causal_mask[
565
+ :, :, key.size(-2) - query.size(-2): key.size(-2), :key.size(-2)
566
+ ]
567
+ if attention_mask is not None:
568
+ attention_mask = attention_mask.expand(
569
+ -1, -1, causal_mask.size(2), -1
570
+ ).masked_fill(~causal_mask, torch.finfo(query.dtype).min)
571
+ else:
572
+ attention_mask = causal_mask
573
+ attn_output = F.scaled_dot_product_attention(
574
+ query, key, value, attn_mask=attention_mask
575
+ ).transpose(1, 2)
576
+ attn_weight = None
577
+ else:
578
+ attn_output, attn_weight = self._attn(
579
+ query, key, value, registered_causal_mask, attention_mask, head_mask
580
+ )
581
+ context_layer = self._merge_heads(
582
+ attn_output, self.num_heads, self.head_dim
583
+ )
584
+
585
+ attn_output = self.c_proj(context_layer)
586
+
587
+ outputs = (attn_output, present)
588
+ if output_attentions:
589
+ if (
590
+ self.use_flash_attn
591
+ and flash_attn_unpadded_func is not None
592
+ and not self.is_fp32
593
+ ):
594
+ raise ValueError("Cannot output attentions while using flash-attn")
595
+ else:
596
+ outputs += (attn_weight,)
597
+
598
+ return outputs
599
+
600
+
601
+ class QWenMLP(nn.Module):
602
+ def __init__(self, config):
603
+ super().__init__()
604
+ self.w1 = nn.Linear(
605
+ config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias
606
+ )
607
+ self.w2 = nn.Linear(
608
+ config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias
609
+ )
610
+ ff_dim_in = config.intermediate_size // 2
611
+ self.c_proj = nn.Linear(ff_dim_in, config.hidden_size, bias=not config.no_bias)
612
+
613
+ def forward(self, hidden_states):
614
+ a1 = self.w1(hidden_states)
615
+ a2 = self.w2(hidden_states)
616
+ intermediate_parallel = a1 * F.silu(a2)
617
+ output = self.c_proj(intermediate_parallel)
618
+ return output
619
+
620
+ class QWenBlock(nn.Module):
621
+ def __init__(self, config):
622
+ super().__init__()
623
+ hidden_size = config.hidden_size
624
+ self.bf16 = config.bf16
625
+
626
+ self.ln_1 = RMSNorm(
627
+ hidden_size,
628
+ eps=config.layer_norm_epsilon,
629
+ )
630
+ self.attn = QWenAttention(config)
631
+ self.ln_2 = RMSNorm(
632
+ hidden_size,
633
+ eps=config.layer_norm_epsilon,
634
+ )
635
+
636
+ self.mlp = QWenMLP(config)
637
+
638
+ def forward(
639
+ self,
640
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
641
+ rotary_pos_emb_list: Optional[List[List[torch.Tensor]]] = None,
642
+ registered_causal_mask: Optional[torch.Tensor] = None,
643
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
644
+ attention_mask: Optional[torch.FloatTensor] = None,
645
+ head_mask: Optional[torch.FloatTensor] = None,
646
+ encoder_hidden_states: Optional[torch.Tensor] = None,
647
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
648
+ use_cache: Optional[bool] = False,
649
+ output_attentions: Optional[bool] = False,
650
+ ):
651
+ layernorm_output = self.ln_1(hidden_states)
652
+
653
+ attn_outputs = self.attn(
654
+ layernorm_output,
655
+ rotary_pos_emb_list,
656
+ registered_causal_mask=registered_causal_mask,
657
+ layer_past=layer_past,
658
+ attention_mask=attention_mask,
659
+ head_mask=head_mask,
660
+ use_cache=use_cache,
661
+ output_attentions=output_attentions,
662
+ )
663
+ attn_output = attn_outputs[0]
664
+
665
+ outputs = attn_outputs[1:]
666
+
667
+ residual = hidden_states
668
+ layernorm_input = attn_output + residual
669
+
670
+ layernorm_output = self.ln_2(layernorm_input)
671
+
672
+ residual = layernorm_input
673
+ mlp_output = self.mlp(layernorm_output)
674
+ hidden_states = residual + mlp_output
675
+
676
+ if use_cache:
677
+ outputs = (hidden_states,) + outputs
678
+ else:
679
+ outputs = (hidden_states,) + outputs[1:]
680
+
681
+ return outputs
682
+
683
+
684
+ class QWenPreTrainedModel(PreTrainedModel):
685
+ config_class = QWenConfig
686
+ base_model_prefix = "transformer"
687
+ is_parallelizable = False
688
+ supports_gradient_checkpointing = True
689
+ _no_split_modules = ["QWenBlock"]
690
+
691
+ def __init__(self, *inputs, **kwargs):
692
+ super().__init__(*inputs, **kwargs)
693
+
694
+ def _init_weights(self, module):
695
+ """Initialize the weights."""
696
+ if isinstance(module, nn.Linear):
697
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
698
+ if module.bias is not None:
699
+ module.bias.data.zero_()
700
+ elif isinstance(module, nn.Embedding):
701
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
702
+ if module.padding_idx is not None:
703
+ module.weight.data[module.padding_idx].zero_()
704
+ elif isinstance(module, RMSNorm):
705
+ module.weight.data.fill_(1.0)
706
+
707
+ for name, p in module.named_parameters():
708
+ if name == "c_proj.weight":
709
+ p.data.normal_(
710
+ mean=0.0,
711
+ std=(
712
+ self.config.initializer_range
713
+ / math.sqrt(2 * self.config.num_hidden_layers)
714
+ ),
715
+ )
716
+
717
+ def _set_gradient_checkpointing(self, module, value=False):
718
+ if isinstance(module, QWenModel):
719
+ module.gradient_checkpointing = value
720
+
721
+
722
+ class QWenModel(QWenPreTrainedModel):
723
+ _keys_to_ignore_on_load_missing = ["attn.masked_bias"]
724
+
725
+ def __init__(self, config):
726
+ super().__init__(config)
727
+ self.vocab_size = config.vocab_size
728
+ self.num_hidden_layers = config.num_hidden_layers
729
+ self.embed_dim = config.hidden_size
730
+ self.use_cache_quantization = self.config.use_cache_quantization if hasattr(self.config, 'use_cache_quantization') else False
731
+
732
+ self.gradient_checkpointing = False
733
+ self.use_dynamic_ntk = config.use_dynamic_ntk
734
+ self.seq_length = config.seq_length
735
+
736
+ self.wte = nn.Embedding(self.vocab_size, self.embed_dim)
737
+
738
+ self.drop = nn.Dropout(config.emb_dropout_prob)
739
+
740
+ if config.rotary_pct == 1.0:
741
+ self.rotary_ndims = None
742
+ else:
743
+ assert config.rotary_pct < 1
744
+ self.rotary_ndims = int(
745
+ config.kv_channels * config.rotary_pct
746
+ )
747
+ dim = (
748
+ self.rotary_ndims
749
+ if self.rotary_ndims is not None
750
+ else config.kv_channels
751
+ )
752
+ self.rotary_emb = RotaryEmbedding(dim, base=config.rotary_emb_base)
753
+
754
+ self.use_flash_attn = config.use_flash_attn
755
+ self.is_fp32 = not (config.bf16 or config.fp16)
756
+ if (
757
+ self.use_flash_attn
758
+ and flash_attn_unpadded_func is not None
759
+ and not self.is_fp32
760
+ ):
761
+ self.registered_causal_mask = None
762
+ else:
763
+ max_positions = config.max_position_embeddings
764
+ self.register_buffer(
765
+ "registered_causal_mask",
766
+ torch.tril(
767
+ torch.ones((max_positions, max_positions), dtype=torch.bool)
768
+ ).view(1, 1, max_positions, max_positions),
769
+ persistent=False,
770
+ )
771
+
772
+ self.h = nn.ModuleList(
773
+ [
774
+ QWenBlock(
775
+ config
776
+ )
777
+ for i in range(config.num_hidden_layers)
778
+ ]
779
+ )
780
+ self.ln_f = RMSNorm(
781
+ self.embed_dim,
782
+ eps=config.layer_norm_epsilon,
783
+ )
784
+
785
+ self.post_init()
786
+
787
+ # BUG: hardcode
788
+ # self.skip_checkpointing_layer_ids = list(range(30))
789
+
790
+ def get_input_embeddings(self):
791
+ return self.wte
792
+
793
+ def set_input_embeddings(self, new_embeddings):
794
+ self.wte = new_embeddings
795
+
796
+ def get_ntk_alpha(self, true_seq_len):
797
+ context_value = math.log(true_seq_len / self.seq_length, 2) + 1
798
+ ntk_alpha = 2 ** math.ceil(context_value) - 1
799
+ ntk_alpha = max(ntk_alpha, 1)
800
+ return ntk_alpha
801
+
802
+ def forward(
803
+ self,
804
+ input_ids: Optional[torch.LongTensor] = None,
805
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
806
+ attention_mask: Optional[torch.FloatTensor] = None,
807
+ token_type_ids: Optional[torch.LongTensor] = None,
808
+ position_ids: Optional[torch.LongTensor] = None,
809
+ head_mask: Optional[torch.FloatTensor] = None,
810
+ inputs_embeds: Optional[torch.FloatTensor] = None,
811
+ encoder_hidden_states: Optional[torch.Tensor] = None,
812
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
813
+ use_cache: Optional[bool] = None,
814
+ output_attentions: Optional[bool] = None,
815
+ output_hidden_states: Optional[bool] = None,
816
+ return_dict: Optional[bool] = None,
817
+ ):
818
+ output_attentions = (
819
+ output_attentions
820
+ if output_attentions is not None
821
+ else self.config.output_attentions
822
+ )
823
+ output_hidden_states = (
824
+ output_hidden_states
825
+ if output_hidden_states is not None
826
+ else self.config.output_hidden_states
827
+ )
828
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
829
+ return_dict = (
830
+ return_dict if return_dict is not None else self.config.use_return_dict
831
+ )
832
+
833
+ if input_ids is not None and inputs_embeds is not None:
834
+ raise ValueError(
835
+ "You cannot specify both input_ids and inputs_embeds at the same time"
836
+ )
837
+ elif input_ids is not None:
838
+ input_shape = input_ids.size()
839
+ input_ids = input_ids.view(-1, input_shape[-1])
840
+ batch_size = input_ids.shape[0]
841
+ elif inputs_embeds is not None:
842
+ input_shape = inputs_embeds.size()[:-1]
843
+ batch_size = inputs_embeds.shape[0]
844
+ else:
845
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
846
+
847
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
848
+
849
+ if token_type_ids is not None:
850
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
851
+ if position_ids is not None:
852
+ position_ids = position_ids.view(-1, input_shape[-1])
853
+
854
+ if past_key_values is None:
855
+ past_length = 0
856
+ past_key_values = tuple([None] * len(self.h))
857
+ else:
858
+ if self.use_cache_quantization:
859
+ past_length = past_key_values[0][0][0].size(2)
860
+ else:
861
+ past_length = past_key_values[0][0].size(-2)
862
+ if position_ids is None:
863
+ position_ids = torch.arange(
864
+ past_length,
865
+ input_shape[-1] + past_length,
866
+ dtype=torch.long,
867
+ device=device,
868
+ )
869
+ position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
870
+
871
+ if attention_mask is not None:
872
+ if batch_size <= 0:
873
+ raise ValueError("batch_size has to be defined and > 0")
874
+ attention_mask = attention_mask.view(batch_size, -1)
875
+ attention_mask = attention_mask[:, None, None, :]
876
+ attention_mask = attention_mask.to(dtype=self.dtype)
877
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
878
+
879
+ encoder_attention_mask = None
880
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
881
+
882
+ if inputs_embeds is None:
883
+ inputs_embeds = self.wte(input_ids)
884
+ hidden_states = inputs_embeds
885
+
886
+ kv_seq_len = hidden_states.size()[1]
887
+ if past_key_values[0] is not None:
888
+ # past key values[0][0] shape: bs * seq_len * head_num * dim
889
+ if self.use_cache_quantization:
890
+ kv_seq_len += past_key_values[0][0][0].shape[2]
891
+ else:
892
+ kv_seq_len += past_key_values[0][0].shape[1]
893
+
894
+ if self.training or not self.use_dynamic_ntk:
895
+ ntk_alpha_list = [1.0]
896
+ elif kv_seq_len != hidden_states.size()[1]:
897
+ ntk_alpha_list = self.rotary_emb._ntk_alpha_cached_list
898
+ else:
899
+ ntk_alpha_list = []
900
+ if attention_mask is not None and kv_seq_len > self.seq_length:
901
+ true_seq_lens = attention_mask.squeeze(1).squeeze(1).eq(0).sum(dim=-1, dtype=torch.int32)
902
+ for i in range(hidden_states.size()[0]):
903
+ true_seq_len = true_seq_lens[i].item()
904
+ ntk_alpha = self.get_ntk_alpha(true_seq_len)
905
+ ntk_alpha_list.append(ntk_alpha)
906
+ else:
907
+ ntk_alpha = self.get_ntk_alpha(kv_seq_len)
908
+ ntk_alpha_list.append(ntk_alpha)
909
+ self.rotary_emb._ntk_alpha_cached_list = ntk_alpha_list
910
+ rotary_pos_emb_list = [
911
+ self.rotary_emb(kv_seq_len, ntk_alpha=ntk_alpha) for ntk_alpha in ntk_alpha_list
912
+ ]
913
+
914
+ hidden_states = self.drop(hidden_states)
915
+ output_shape = input_shape + (hidden_states.size(-1),)
916
+
917
+ if self.gradient_checkpointing and self.training:
918
+ if use_cache:
919
+ logger.warning_once(
920
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
921
+ )
922
+ use_cache = False
923
+
924
+ presents = () if use_cache else None
925
+ all_self_attentions = () if output_attentions else None
926
+ all_hidden_states = () if output_hidden_states else None
927
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
928
+
929
+ if output_hidden_states:
930
+ all_hidden_states = all_hidden_states + (hidden_states,)
931
+
932
+ # BUG: not work
933
+ forward_checkpointing = (self.gradient_checkpointing and self.training)
934
+ # if self.skip_checkpointing_layer_ids is not None and i in self.skip_checkpointing_layer_ids:
935
+ # forward_checkpointing = False
936
+
937
+ if forward_checkpointing:
938
+ def create_custom_forward(module):
939
+ def custom_forward(*inputs):
940
+ # None for past_key_value
941
+ return module(*inputs, use_cache, output_attentions)
942
+
943
+ return custom_forward
944
+
945
+ outputs = torch.utils.checkpoint.checkpoint(
946
+ create_custom_forward(block),
947
+ hidden_states,
948
+ rotary_pos_emb_list,
949
+ self.registered_causal_mask,
950
+ None,
951
+ attention_mask,
952
+ head_mask[i],
953
+ encoder_hidden_states,
954
+ encoder_attention_mask,
955
+ )
956
+ else:
957
+ outputs = block(
958
+ hidden_states,
959
+ layer_past=layer_past,
960
+ rotary_pos_emb_list=rotary_pos_emb_list,
961
+ registered_causal_mask=self.registered_causal_mask,
962
+ attention_mask=attention_mask,
963
+ head_mask=head_mask[i],
964
+ encoder_hidden_states=encoder_hidden_states,
965
+ encoder_attention_mask=encoder_attention_mask,
966
+ use_cache=use_cache,
967
+ output_attentions=output_attentions,
968
+ )
969
+
970
+ hidden_states = outputs[0]
971
+ if use_cache is True:
972
+ presents = presents + (outputs[1],)
973
+
974
+ if output_attentions:
975
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
976
+
977
+ hidden_states = self.ln_f(hidden_states)
978
+ hidden_states = hidden_states.view(output_shape)
979
+ # Add last hidden state
980
+ if output_hidden_states:
981
+ all_hidden_states = all_hidden_states + (hidden_states,)
982
+
983
+ if not return_dict:
984
+ return tuple(
985
+ v for v in [hidden_states, presents, all_hidden_states] if v is not None
986
+ )
987
+
988
+ return BaseModelOutputWithPast(
989
+ last_hidden_state=hidden_states,
990
+ past_key_values=presents,
991
+ hidden_states=all_hidden_states,
992
+ attentions=all_self_attentions,
993
+ )
994
+
995
+
996
+ class QWenLMHeadModel(QWenPreTrainedModel):
997
+ _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.rotary_emb\.inv_freq"]
998
+ _keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.masked_bias"]
999
+
1000
+ def __init__(self, config):
1001
+ super().__init__(config)
1002
+ assert (
1003
+ config.bf16 + config.fp16 + config.fp32 <= 1
1004
+ ), "Only one of \"bf16\", \"fp16\", \"fp32\" can be true"
1005
+ logger.warn(
1006
+ "Warning: please make sure that you are using the latest codes and checkpoints, "
1007
+ "especially if you used Qwen-7B before 09.25.2023."
1008
+ "请使用最新模型和代码,尤其如果你在9月25日前已经开始使用Qwen-7B,千万注意不要使用错误代码和模型。"
1009
+ )
1010
+
1011
+ autoset_precision = config.bf16 + config.fp16 + config.fp32 == 0
1012
+
1013
+ if autoset_precision:
1014
+ if SUPPORT_BF16:
1015
+ logger.warn(
1016
+ "The model is automatically converting to bf16 for faster inference. "
1017
+ "If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"."
1018
+ )
1019
+ config.bf16 = True
1020
+ elif SUPPORT_FP16:
1021
+ logger.warn(
1022
+ "The model is automatically converting to fp16 for faster inference. "
1023
+ "If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"."
1024
+ )
1025
+ config.fp16 = True
1026
+ else:
1027
+ config.fp32 = True
1028
+
1029
+ if config.bf16 and SUPPORT_CUDA and not SUPPORT_BF16:
1030
+ logger.warn("Your device does NOT seem to support bf16, you can switch to fp16 or fp32 by by passing fp16/fp32=True in \"AutoModelForCausalLM.from_pretrained\".")
1031
+ if config.fp16 and SUPPORT_CUDA and not SUPPORT_FP16:
1032
+ logger.warn("Your device does NOT support faster inference with fp16, please switch to fp32 which is likely to be faster")
1033
+ if config.fp32:
1034
+ if SUPPORT_BF16:
1035
+ logger.warn("Your device support faster inference by passing bf16=True in \"AutoModelForCausalLM.from_pretrained\".")
1036
+ elif SUPPORT_FP16:
1037
+ logger.warn("Your device support faster inference by passing fp16=True in \"AutoModelForCausalLM.from_pretrained\".")
1038
+
1039
+ if config.use_flash_attn == "auto":
1040
+ if config.bf16 or config.fp16:
1041
+ logger.warn("Try importing flash-attention for faster inference...")
1042
+ config.use_flash_attn = True
1043
+ else:
1044
+ config.use_flash_attn = False
1045
+ if config.use_flash_attn and config.fp32:
1046
+ logger.warn("Flash attention will be disabled because it does NOT support fp32.")
1047
+
1048
+ if config.use_flash_attn:
1049
+ _import_flash_attn()
1050
+
1051
+ self.transformer = QWenModel(config)
1052
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1053
+
1054
+ if config.bf16:
1055
+ self.transformer.bfloat16()
1056
+ self.lm_head.bfloat16()
1057
+ if config.fp16:
1058
+ self.transformer.half()
1059
+ self.lm_head.half()
1060
+ self.post_init()
1061
+
1062
+
1063
+ def get_output_embeddings(self):
1064
+ return self.lm_head
1065
+
1066
+ def set_output_embeddings(self, new_embeddings):
1067
+ self.lm_head = new_embeddings
1068
+
1069
+ def prepare_inputs_for_generation(
1070
+ self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs
1071
+ ):
1072
+ token_type_ids = kwargs.get("token_type_ids", None)
1073
+ if past_key_values:
1074
+ input_ids = input_ids[:, -1].unsqueeze(-1)
1075
+ if token_type_ids is not None:
1076
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
1077
+
1078
+ attention_mask = kwargs.get("attention_mask", None)
1079
+ position_ids = kwargs.get("position_ids", None)
1080
+
1081
+ if attention_mask is not None and position_ids is None:
1082
+ position_ids = attention_mask.long().cumsum(-1) - 1
1083
+ position_ids.masked_fill_(attention_mask == 0, 1)
1084
+ if past_key_values:
1085
+ position_ids = position_ids[:, -1].unsqueeze(-1)
1086
+ else:
1087
+ position_ids = None
1088
+
1089
+ if inputs_embeds is not None and past_key_values is None:
1090
+ model_inputs = {"inputs_embeds": inputs_embeds}
1091
+ else:
1092
+ model_inputs = {"input_ids": input_ids}
1093
+
1094
+ model_inputs.update(
1095
+ {
1096
+ "past_key_values": past_key_values,
1097
+ "use_cache": kwargs.get("use_cache"),
1098
+ "position_ids": position_ids,
1099
+ "attention_mask": attention_mask,
1100
+ "token_type_ids": token_type_ids,
1101
+ }
1102
+ )
1103
+ return model_inputs
1104
+
1105
+ def forward(
1106
+ self,
1107
+ input_ids: Optional[torch.LongTensor] = None,
1108
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1109
+ attention_mask: Optional[torch.FloatTensor] = None,
1110
+ token_type_ids: Optional[torch.LongTensor] = None,
1111
+ position_ids: Optional[torch.LongTensor] = None,
1112
+ head_mask: Optional[torch.FloatTensor] = None,
1113
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1114
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1115
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1116
+ labels: Optional[torch.LongTensor] = None,
1117
+ use_cache: Optional[bool] = None,
1118
+ output_attentions: Optional[bool] = None,
1119
+ output_hidden_states: Optional[bool] = None,
1120
+ return_dict: Optional[bool] = None,
1121
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1122
+
1123
+ return_dict = (
1124
+ return_dict if return_dict is not None else self.config.use_return_dict
1125
+ )
1126
+
1127
+ transformer_outputs = self.transformer(
1128
+ input_ids,
1129
+ past_key_values=past_key_values,
1130
+ attention_mask=attention_mask,
1131
+ token_type_ids=token_type_ids,
1132
+ position_ids=position_ids,
1133
+ head_mask=head_mask,
1134
+ inputs_embeds=inputs_embeds,
1135
+ encoder_hidden_states=encoder_hidden_states,
1136
+ encoder_attention_mask=encoder_attention_mask,
1137
+ use_cache=use_cache,
1138
+ output_attentions=output_attentions,
1139
+ output_hidden_states=output_hidden_states,
1140
+ return_dict=return_dict,
1141
+ )
1142
+ hidden_states = transformer_outputs[0]
1143
+
1144
+ lm_logits = self.lm_head(hidden_states)
1145
+
1146
+ loss = None
1147
+ if labels is not None:
1148
+ labels = labels.to(lm_logits.device)
1149
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1150
+ shift_labels = labels[..., 1:].contiguous()
1151
+ loss_fct = CrossEntropyLoss()
1152
+ loss = loss_fct(
1153
+ shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)
1154
+ )
1155
+
1156
+ if not return_dict:
1157
+ output = (lm_logits,) + transformer_outputs[1:]
1158
+ return ((loss,) + output) if loss is not None else output
1159
+
1160
+ return CausalLMOutputWithPast(
1161
+ loss=loss,
1162
+ logits=lm_logits,
1163
+ past_key_values=transformer_outputs.past_key_values,
1164
+ hidden_states=transformer_outputs.hidden_states,
1165
+ attentions=transformer_outputs.attentions,
1166
+ )
1167
+
1168
+ @staticmethod
1169
+ def _reorder_cache(
1170
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
1171
+ ) -> Tuple[Tuple[torch.Tensor]]:
1172
+
1173
+ return tuple(
1174
+ tuple(
1175
+ past_state.index_select(0, beam_idx.to(past_state.device))
1176
+ for past_state in layer_past
1177
+ )
1178
+ for layer_past in past_key_values
1179
+ )
1180
+
1181
+ def chat(
1182
+ self,
1183
+ tokenizer: PreTrainedTokenizer,
1184
+ query: str,
1185
+ history: Optional[HistoryType],
1186
+ system: str = "You are a helpful assistant.",
1187
+ append_history: bool = True,
1188
+ stream: Optional[bool] = _SENTINEL,
1189
+ stop_words_ids: Optional[List[List[int]]] = None,
1190
+ generation_config: Optional[GenerationConfig] = None,
1191
+ **kwargs,
1192
+ ) -> Tuple[str, HistoryType]:
1193
+ generation_config = generation_config if generation_config is not None else self.generation_config
1194
+
1195
+ assert stream is _SENTINEL, _ERROR_STREAM_IN_CHAT
1196
+ assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT
1197
+ if history is None:
1198
+ history = []
1199
+ if stop_words_ids is None:
1200
+ stop_words_ids = []
1201
+
1202
+ max_window_size = kwargs.get('max_window_size', None)
1203
+ if max_window_size is None:
1204
+ max_window_size = generation_config.max_window_size
1205
+ raw_text, context_tokens = make_context(
1206
+ tokenizer,
1207
+ query,
1208
+ history=history,
1209
+ system=system,
1210
+ max_window_size=max_window_size,
1211
+ chat_format=generation_config.chat_format,
1212
+ )
1213
+
1214
+ stop_words_ids.extend(get_stop_words_ids(
1215
+ generation_config.chat_format, tokenizer
1216
+ ))
1217
+ input_ids = torch.tensor([context_tokens]).to(self.device)
1218
+ outputs = self.generate(
1219
+ input_ids,
1220
+ stop_words_ids=stop_words_ids,
1221
+ return_dict_in_generate=False,
1222
+ generation_config=generation_config,
1223
+ **kwargs,
1224
+ )
1225
+
1226
+ response = decode_tokens(
1227
+ outputs[0],
1228
+ tokenizer,
1229
+ raw_text_len=len(raw_text),
1230
+ context_length=len(context_tokens),
1231
+ chat_format=generation_config.chat_format,
1232
+ verbose=False,
1233
+ errors='replace'
1234
+ )
1235
+
1236
+ if append_history:
1237
+ history.append((query, response))
1238
+
1239
+ return response, history
1240
+
1241
+ def chat_stream(
1242
+ self,
1243
+ tokenizer: PreTrainedTokenizer,
1244
+ query: str,
1245
+ history: Optional[HistoryType],
1246
+ system: str = "You are a helpful assistant.",
1247
+ stop_words_ids: Optional[List[List[int]]] = None,
1248
+ logits_processor: Optional[LogitsProcessorList] = None,
1249
+ generation_config: Optional[GenerationConfig] = None,
1250
+ **kwargs,
1251
+ ) -> Generator[str, Any, None]:
1252
+ generation_config = generation_config if generation_config is not None else self.generation_config
1253
+ assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT
1254
+ if history is None:
1255
+ history = []
1256
+ if stop_words_ids is None:
1257
+ stop_words_ids = []
1258
+
1259
+ max_window_size = kwargs.get('max_window_size', None)
1260
+ if max_window_size is None:
1261
+ max_window_size = generation_config.max_window_size
1262
+ raw_text, context_tokens = make_context(
1263
+ tokenizer,
1264
+ query,
1265
+ history=history,
1266
+ system=system,
1267
+ max_window_size=max_window_size,
1268
+ chat_format=generation_config.chat_format,
1269
+ )
1270
+
1271
+ stop_words_ids.extend(get_stop_words_ids(
1272
+ generation_config.chat_format, tokenizer
1273
+ ))
1274
+ if stop_words_ids is not None:
1275
+ stop_words_logits_processor = StopWordsLogitsProcessor(
1276
+ stop_words_ids=stop_words_ids,
1277
+ eos_token_id=generation_config.eos_token_id,
1278
+ )
1279
+ if logits_processor is None:
1280
+ logits_processor = LogitsProcessorList([stop_words_logits_processor])
1281
+ else:
1282
+ logits_processor.append(stop_words_logits_processor)
1283
+ input_ids = torch.tensor([context_tokens]).to(self.device)
1284
+
1285
+ from transformers_stream_generator.main import NewGenerationMixin, StreamGenerationConfig
1286
+ self.__class__.generate_stream = NewGenerationMixin.generate
1287
+ self.__class__.sample_stream = NewGenerationMixin.sample_stream
1288
+ stream_config = StreamGenerationConfig(**generation_config.to_dict(), do_stream=True)
1289
+
1290
+ def stream_generator():
1291
+ outputs = []
1292
+ for token in self.generate_stream(
1293
+ input_ids,
1294
+ return_dict_in_generate=False,
1295
+ generation_config=stream_config,
1296
+ logits_processor=logits_processor,
1297
+ seed=-1,
1298
+ **kwargs):
1299
+ outputs.append(token.item())
1300
+ yield tokenizer.decode(outputs, skip_special_tokens=True, errors='ignore')
1301
+
1302
+ return stream_generator()
1303
+
1304
+ def generate(
1305
+ self,
1306
+ inputs: Optional[torch.Tensor] = None,
1307
+ generation_config: Optional[GenerationConfig] = None,
1308
+ logits_processor: Optional[LogitsProcessorList] = None,
1309
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
1310
+ prefix_allowed_tokens_fn: Optional[
1311
+ Callable[[int, torch.Tensor], List[int]]
1312
+ ] = None,
1313
+ synced_gpus: Optional[bool] = None,
1314
+ assistant_model: Optional["PreTrainedModel"] = None,
1315
+ streamer: Optional["BaseStreamer"] = None,
1316
+ **kwargs,
1317
+ ) -> Union[GenerateOutput, torch.LongTensor]:
1318
+ generation_config = generation_config if generation_config is not None else self.generation_config
1319
+
1320
+ # Process stop_words_ids.
1321
+ stop_words_ids = kwargs.pop("stop_words_ids", None)
1322
+ if stop_words_ids is None and generation_config is not None:
1323
+ stop_words_ids = getattr(generation_config, "stop_words_ids", None)
1324
+ if stop_words_ids is None:
1325
+ stop_words_ids = getattr(generation_config, "stop_words_ids", None)
1326
+
1327
+ if stop_words_ids is not None:
1328
+ stop_words_logits_processor = StopWordsLogitsProcessor(
1329
+ stop_words_ids=stop_words_ids,
1330
+ eos_token_id=generation_config.eos_token_id,
1331
+ )
1332
+ if logits_processor is None:
1333
+ logits_processor = LogitsProcessorList([stop_words_logits_processor])
1334
+ else:
1335
+ logits_processor.append(stop_words_logits_processor)
1336
+
1337
+ return super().generate(
1338
+ inputs,
1339
+ generation_config=generation_config,
1340
+ logits_processor=logits_processor,
1341
+ stopping_criteria=stopping_criteria,
1342
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
1343
+ synced_gpus=synced_gpus,
1344
+ assistant_model=assistant_model,
1345
+ streamer=streamer,
1346
+ **kwargs,
1347
+ )
1348
+
1349
+
1350
+ class RotaryEmbedding(torch.nn.Module):
1351
+ def __init__(self, dim, base=10000):
1352
+ super().__init__()
1353
+ self.dim = dim
1354
+ self.base = base
1355
+ inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
1356
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
1357
+ if importlib.util.find_spec("einops") is None:
1358
+ raise RuntimeError("einops is required for Rotary Embedding")
1359
+
1360
+ self._rotary_pos_emb_cache = None
1361
+ self._seq_len_cached = 0
1362
+ self._ntk_alpha_cached = 1.0
1363
+ self._ntk_alpha_cached_list = [1.0]
1364
+
1365
+ def update_rotary_pos_emb_cache(self, max_seq_len, offset=0, ntk_alpha=1.0):
1366
+ seqlen = max_seq_len + offset
1367
+ if seqlen > self._seq_len_cached or ntk_alpha != self._ntk_alpha_cached:
1368
+ base = self.base * ntk_alpha ** (self.dim / (self.dim - 2))
1369
+ self.inv_freq = 1.0 / (
1370
+ base
1371
+ ** (
1372
+ torch.arange(0, self.dim, 2, device=self.inv_freq.device).float()
1373
+ / self.dim
1374
+ )
1375
+ )
1376
+ self._seq_len_cached = max(2 * seqlen, 16)
1377
+ self._ntk_alpha_cached = ntk_alpha
1378
+ seq = torch.arange(self._seq_len_cached, device=self.inv_freq.device)
1379
+ freqs = torch.outer(seq.type_as(self.inv_freq), self.inv_freq)
1380
+
1381
+ emb = torch.cat((freqs, freqs), dim=-1)
1382
+ from einops import rearrange
1383
+
1384
+ emb = rearrange(emb, "n d -> 1 n 1 d")
1385
+
1386
+ cos, sin = emb.cos(), emb.sin()
1387
+ self._rotary_pos_emb_cache = [cos, sin]
1388
+
1389
+ def forward(self, max_seq_len, offset=0, ntk_alpha=1.0):
1390
+ self.update_rotary_pos_emb_cache(max_seq_len, offset, ntk_alpha)
1391
+ cos, sin = self._rotary_pos_emb_cache
1392
+ return [cos[:, offset : offset + max_seq_len], sin[:, offset : offset + max_seq_len]]
1393
+
1394
+
1395
+ def _rotate_half(x):
1396
+ from einops import rearrange
1397
+
1398
+ x = rearrange(x, "... (j d) -> ... j d", j=2)
1399
+ x1, x2 = x.unbind(dim=-2)
1400
+ return torch.cat((-x2, x1), dim=-1)
1401
+
1402
+
1403
+ def apply_rotary_pos_emb(t, freqs):
1404
+ cos, sin = freqs
1405
+ if apply_rotary_emb_func is not None and t.is_cuda:
1406
+ t_ = t.float()
1407
+ cos = cos.squeeze(0).squeeze(1)[:, : cos.shape[-1] // 2]
1408
+ sin = sin.squeeze(0).squeeze(1)[:, : sin.shape[-1] // 2]
1409
+ output = apply_rotary_emb_func(t_, cos, sin).type_as(t)
1410
+ return output
1411
+ else:
1412
+ rot_dim = freqs[0].shape[-1]
1413
+ cos, sin = freqs
1414
+ t_, t_pass_ = t[..., :rot_dim], t[..., rot_dim:]
1415
+ t_ = t_.float()
1416
+ t_pass_ = t_pass_.float()
1417
+ t_ = (t_ * cos) + (_rotate_half(t_) * sin)
1418
+ return torch.cat((t_, t_pass_), dim=-1).type_as(t)
1419
+
1420
+
1421
+ class RMSNorm(torch.nn.Module):
1422
+ def __init__(self, dim: int, eps: float = 1e-6):
1423
+ super().__init__()
1424
+ self.eps = eps
1425
+ self.weight = nn.Parameter(torch.ones(dim))
1426
+
1427
+ def _norm(self, x):
1428
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
1429
+
1430
+ def forward(self, x):
1431
+ if rms_norm is not None and x.is_cuda:
1432
+ return rms_norm(x, self.weight, self.eps)
1433
+ else:
1434
+ output = self._norm(x.float()).type_as(x)
1435
+ return output * self.weight
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78de5d20b266b8de66fa548ba32fed4c3bbbc07fe5b7f1683d4fd2dc802fd390
3
+ size 16029842855
quantize_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bits": 8,
3
+ "group_size": 128,
4
+ "damp_percent": 0.01,
5
+ "desc_act": false,
6
+ "static_groups": false,
7
+ "sym": true,
8
+ "true_sequential": true,
9
+ "model_name_or_path": null,
10
+ "model_file_base_name": null
11
+ }
qwen.tiktoken ADDED
The diff for this file is too large to render. See raw diff
 
qwen_generation_utils.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ """Generation support."""
7
+
8
+ from typing import Tuple, List, Union, Iterable
9
+
10
+ import numpy as np
11
+ import torch
12
+ import torch.nn.functional as F
13
+ from transformers import PreTrainedTokenizer
14
+ from transformers import logging
15
+ from transformers.generation import LogitsProcessor
16
+
17
+ logger = logging.get_logger(__name__)
18
+
19
+ # Types.
20
+ HistoryType = List[Tuple[str, str]]
21
+ TokensType = List[int]
22
+ BatchTokensType = List[List[int]]
23
+
24
+
25
+ def pad_batch(batch: BatchTokensType, pad_id: int, seq_length: int) -> BatchTokensType:
26
+ for tokens in batch:
27
+ context_length = len(tokens)
28
+ if context_length < seq_length:
29
+ tokens.extend([pad_id] * (seq_length - context_length))
30
+ return batch
31
+
32
+
33
+ def get_ltor_masks_and_position_ids(
34
+ data,
35
+ eod_token,
36
+ reset_position_ids,
37
+ reset_attention_mask,
38
+ eod_mask_loss,
39
+ ):
40
+ """Build masks and position id for left to right model."""
41
+
42
+ # Extract batch size and sequence length.
43
+ micro_batch_size, seq_length = data.size()
44
+
45
+ # Attention mask (lower triangular).
46
+ if reset_attention_mask:
47
+ att_mask_batch = micro_batch_size
48
+ else:
49
+ att_mask_batch = 1
50
+ attention_mask = torch.tril(
51
+ torch.ones((att_mask_batch, seq_length, seq_length), device=data.device)
52
+ ).view(att_mask_batch, 1, seq_length, seq_length)
53
+
54
+ # Loss mask.
55
+ loss_mask = torch.ones(data.size(), dtype=torch.float, device=data.device)
56
+ if eod_mask_loss:
57
+ loss_mask[data == eod_token] = 0.0
58
+
59
+ # Position ids.
60
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)
61
+ position_ids = position_ids.unsqueeze(0).expand_as(data)
62
+ # We need to clone as the ids will be modifed based on batch index.
63
+ if reset_position_ids:
64
+ position_ids = position_ids.clone()
65
+
66
+ if reset_position_ids or reset_attention_mask:
67
+ # Loop through the batches:
68
+ for b in range(micro_batch_size):
69
+
70
+ # Find indecies where EOD token is.
71
+ eod_index = position_ids[b, data[b] == eod_token]
72
+ # Detach indecies from positions if going to modify positions.
73
+ if reset_position_ids:
74
+ eod_index = eod_index.clone()
75
+
76
+ # Loop through EOD indecies:
77
+ prev_index = 0
78
+ for j in range(eod_index.size()[0]):
79
+ i = eod_index[j]
80
+ # Mask attention loss.
81
+ if reset_attention_mask:
82
+ attention_mask[b, 0, (i + 1) :, : (i + 1)] = 0
83
+ # Reset positions.
84
+ if reset_position_ids:
85
+ position_ids[b, (i + 1) :] -= i + 1 - prev_index
86
+ prev_index = i + 1
87
+
88
+ # Convert attention mask to binary:
89
+ attention_mask = attention_mask < 0.5
90
+
91
+ return attention_mask, loss_mask, position_ids
92
+
93
+
94
+ def get_batch(context_tokens: torch.LongTensor, eod_id: int):
95
+ """Generate batch from context tokens."""
96
+ # Move to GPU.
97
+ tokens = context_tokens.contiguous().to(context_tokens.device)
98
+ # Get the attention mask and postition ids.
99
+ attention_mask, _, position_ids = get_ltor_masks_and_position_ids(
100
+ tokens,
101
+ eod_id,
102
+ reset_position_ids=False,
103
+ reset_attention_mask=False,
104
+ eod_mask_loss=False,
105
+ )
106
+ return tokens, attention_mask, position_ids
107
+
108
+
109
+ def get_stop_words_ids(chat_format, tokenizer):
110
+ if chat_format == "raw":
111
+ stop_words_ids = [tokenizer.encode("Human:"), [tokenizer.eod_id]]
112
+ elif chat_format == "chatml":
113
+ stop_words_ids = [[tokenizer.im_end_id], [tokenizer.im_start_id]]
114
+ else:
115
+ raise NotImplementedError(f"Unknown chat format {chat_format!r}")
116
+ return stop_words_ids
117
+
118
+
119
+ def make_context(
120
+ tokenizer: PreTrainedTokenizer,
121
+ query: str,
122
+ history: List[Tuple[str, str]] = None,
123
+ system: str = "",
124
+ max_window_size: int = 6144,
125
+ chat_format: str = "chatml",
126
+ ):
127
+ if history is None:
128
+ history = []
129
+
130
+ if chat_format == "chatml":
131
+ im_start, im_end = "<|im_start|>", "<|im_end|>"
132
+ im_start_tokens = [tokenizer.im_start_id]
133
+ im_end_tokens = [tokenizer.im_end_id]
134
+ nl_tokens = tokenizer.encode("\n")
135
+
136
+ def _tokenize_str(role, content):
137
+ return f"{role}\n{content}", tokenizer.encode(
138
+ role, allowed_special=set()
139
+ ) + nl_tokens + tokenizer.encode(content, allowed_special=set())
140
+
141
+ system_text, system_tokens_part = _tokenize_str("system", system)
142
+ system_tokens = im_start_tokens + system_tokens_part + im_end_tokens
143
+
144
+ raw_text = ""
145
+ context_tokens = []
146
+
147
+ for turn_query, turn_response in reversed(history):
148
+ query_text, query_tokens_part = _tokenize_str("user", turn_query)
149
+ query_tokens = im_start_tokens + query_tokens_part + im_end_tokens
150
+ response_text, response_tokens_part = _tokenize_str(
151
+ "assistant", turn_response
152
+ )
153
+ response_tokens = im_start_tokens + response_tokens_part + im_end_tokens
154
+
155
+ next_context_tokens = nl_tokens + query_tokens + nl_tokens + response_tokens
156
+ prev_chat = (
157
+ f"\n{im_start}{query_text}{im_end}\n{im_start}{response_text}{im_end}"
158
+ )
159
+
160
+ current_context_size = (
161
+ len(system_tokens) + len(next_context_tokens) + len(context_tokens)
162
+ )
163
+ if current_context_size < max_window_size:
164
+ context_tokens = next_context_tokens + context_tokens
165
+ raw_text = prev_chat + raw_text
166
+ else:
167
+ break
168
+
169
+ context_tokens = system_tokens + context_tokens
170
+ raw_text = f"{im_start}{system_text}{im_end}" + raw_text
171
+ context_tokens += (
172
+ nl_tokens
173
+ + im_start_tokens
174
+ + _tokenize_str("user", query)[1]
175
+ + im_end_tokens
176
+ + nl_tokens
177
+ + im_start_tokens
178
+ + tokenizer.encode("assistant")
179
+ + nl_tokens
180
+ )
181
+ raw_text += f"\n{im_start}user\n{query}{im_end}\n{im_start}assistant\n"
182
+
183
+ elif chat_format == "raw":
184
+ raw_text = query
185
+ context_tokens = tokenizer.encode(raw_text)
186
+ else:
187
+ raise NotImplementedError(f"Unknown chat format {chat_format!r}")
188
+
189
+ return raw_text, context_tokens
190
+
191
+
192
+ def _decode_default(
193
+ tokens: List[int],
194
+ *,
195
+ stop_words: List[str],
196
+ eod_words: List[str],
197
+ tokenizer: PreTrainedTokenizer,
198
+ raw_text_len: int,
199
+ verbose: bool = False,
200
+ return_end_reason: bool = False,
201
+ errors: str='replace',
202
+ ):
203
+ trim_decode_tokens = tokenizer.decode(tokens, errors=errors)[raw_text_len:]
204
+ if verbose:
205
+ print("\nRaw Generate: ", trim_decode_tokens)
206
+
207
+ end_reason = f"Gen length {len(tokens)}"
208
+ for stop_word in stop_words:
209
+ trim_decode_tokens = trim_decode_tokens.replace(stop_word, "").strip()
210
+ for eod_word in eod_words:
211
+ if eod_word in trim_decode_tokens:
212
+ end_reason = f"Gen {eod_word!r}"
213
+ trim_decode_tokens = trim_decode_tokens.split(eod_word)[0]
214
+ trim_decode_tokens = trim_decode_tokens.strip()
215
+ if verbose:
216
+ print("\nEnd Reason:", end_reason)
217
+ print("\nGenerate: ", trim_decode_tokens)
218
+
219
+ if return_end_reason:
220
+ return trim_decode_tokens, end_reason
221
+ else:
222
+ return trim_decode_tokens
223
+
224
+
225
+ def _decode_chatml(
226
+ tokens: List[int],
227
+ *,
228
+ stop_words: List[str],
229
+ eod_token_ids: List[int],
230
+ tokenizer: PreTrainedTokenizer,
231
+ raw_text_len: int,
232
+ context_length: int,
233
+ verbose: bool = False,
234
+ return_end_reason: bool = False,
235
+ errors: str='replace'
236
+ ):
237
+ end_reason = f"Gen length {len(tokens)}"
238
+ eod_token_idx = context_length
239
+ for eod_token_idx in range(context_length, len(tokens)):
240
+ if tokens[eod_token_idx] in eod_token_ids:
241
+ end_reason = f"Gen {tokenizer.decode([tokens[eod_token_idx]])!r}"
242
+ break
243
+
244
+ trim_decode_tokens = tokenizer.decode(tokens[:eod_token_idx], errors=errors)[raw_text_len:]
245
+ if verbose:
246
+ print("\nRaw Generate w/o EOD:", tokenizer.decode(tokens, errors=errors)[raw_text_len:])
247
+ print("\nRaw Generate:", trim_decode_tokens)
248
+ print("\nEnd Reason:", end_reason)
249
+ for stop_word in stop_words:
250
+ trim_decode_tokens = trim_decode_tokens.replace(stop_word, "").strip()
251
+ trim_decode_tokens = trim_decode_tokens.strip()
252
+ if verbose:
253
+ print("\nGenerate:", trim_decode_tokens)
254
+
255
+ if return_end_reason:
256
+ return trim_decode_tokens, end_reason
257
+ else:
258
+ return trim_decode_tokens
259
+
260
+
261
+ def decode_tokens(
262
+ tokens: Union[torch.LongTensor, TokensType],
263
+ tokenizer: PreTrainedTokenizer,
264
+ raw_text_len: int,
265
+ context_length: int,
266
+ chat_format: str,
267
+ verbose: bool = False,
268
+ return_end_reason: bool = False,
269
+ errors: str="replace",
270
+ ) -> str:
271
+ if torch.is_tensor(tokens):
272
+ tokens = tokens.cpu().numpy().tolist()
273
+
274
+ if chat_format == "chatml":
275
+ return _decode_chatml(
276
+ tokens,
277
+ stop_words=[],
278
+ eod_token_ids=[tokenizer.im_start_id, tokenizer.im_end_id],
279
+ tokenizer=tokenizer,
280
+ raw_text_len=raw_text_len,
281
+ context_length=context_length,
282
+ verbose=verbose,
283
+ return_end_reason=return_end_reason,
284
+ errors=errors,
285
+ )
286
+ elif chat_format == "raw":
287
+ return _decode_default(
288
+ tokens,
289
+ stop_words=["<|endoftext|>"],
290
+ eod_words=["<|endoftext|>"],
291
+ tokenizer=tokenizer,
292
+ raw_text_len=raw_text_len,
293
+ verbose=verbose,
294
+ return_end_reason=return_end_reason,
295
+ errors=errors,
296
+ )
297
+ else:
298
+ raise NotImplementedError(f"Unknown chat format {chat_format!r}")
299
+
300
+
301
+ class StopWordsLogitsProcessor(LogitsProcessor):
302
+ """
303
+ :class:`transformers.LogitsProcessor` that enforces that when specified sequences appear, stop geration.
304
+
305
+ Args:
306
+ stop_words_ids (:obj:`List[List[int]]`):
307
+ List of list of token ids of stop ids. In order to get the tokens of the words
308
+ that should not appear in the generated text, use :obj:`tokenizer(bad_word,
309
+ add_prefix_space=True).input_ids`.
310
+ eos_token_id (:obj:`int`):
311
+ The id of the `end-of-sequence` token.
312
+ """
313
+
314
+ def __init__(self, stop_words_ids: Iterable[Iterable[int]], eos_token_id: int):
315
+
316
+ if not isinstance(stop_words_ids, List) or len(stop_words_ids) == 0:
317
+ raise ValueError(
318
+ f"`stop_words_ids` has to be a non-emtpy list, but is {stop_words_ids}."
319
+ )
320
+ if any(not isinstance(bad_word_ids, list) for bad_word_ids in stop_words_ids):
321
+ raise ValueError(
322
+ f"`stop_words_ids` has to be a list of lists, but is {stop_words_ids}."
323
+ )
324
+ if any(
325
+ any(
326
+ (not isinstance(token_id, (int, np.integer)) or token_id < 0)
327
+ for token_id in stop_word_ids
328
+ )
329
+ for stop_word_ids in stop_words_ids
330
+ ):
331
+ raise ValueError(
332
+ f"Each list in `stop_words_ids` has to be a list of positive integers, but is {stop_words_ids}."
333
+ )
334
+
335
+ self.stop_words_ids = list(
336
+ filter(
337
+ lambda bad_token_seq: bad_token_seq != [eos_token_id], stop_words_ids
338
+ )
339
+ )
340
+ self.eos_token_id = eos_token_id
341
+ for stop_token_seq in self.stop_words_ids:
342
+ assert (
343
+ len(stop_token_seq) > 0
344
+ ), "Stop words token sequences {} cannot have an empty list".format(
345
+ stop_words_ids
346
+ )
347
+
348
+ def __call__(
349
+ self, input_ids: torch.LongTensor, scores: torch.FloatTensor
350
+ ) -> torch.FloatTensor:
351
+ stopped_samples = self._calc_stopped_samples(input_ids)
352
+ for i, should_stop in enumerate(stopped_samples):
353
+ if should_stop:
354
+ scores[i, self.eos_token_id] = float(2**15)
355
+ return scores
356
+
357
+ def _tokens_match(self, prev_tokens: torch.LongTensor, tokens: List[int]) -> bool:
358
+ if len(tokens) == 0:
359
+ # if bad word tokens is just one token always ban it
360
+ return True
361
+ elif len(tokens) > len(prev_tokens):
362
+ # if bad word tokens are longer then prev input_ids they can't be equal
363
+ return False
364
+ elif prev_tokens[-len(tokens) :].tolist() == tokens:
365
+ # if tokens match
366
+ return True
367
+ else:
368
+ return False
369
+
370
+ def _calc_stopped_samples(self, prev_input_ids: Iterable[int]) -> Iterable[int]:
371
+ stopped_samples = []
372
+ for prev_input_ids_slice in prev_input_ids:
373
+ match = False
374
+ for stop_token_seq in self.stop_words_ids:
375
+ if self._tokens_match(prev_input_ids_slice, stop_token_seq):
376
+ # if tokens do not match continue
377
+ match = True
378
+ break
379
+ stopped_samples.append(match)
380
+
381
+ return stopped_samples
382
+
383
+
384
+ def top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float("Inf")):
385
+ """This function has been mostly taken from huggingface conversational
386
+ ai code at
387
+ https://medium.com/huggingface/how-to-build-a-state-of-the-art-
388
+ conversational-ai-with-transfer-learning-2d818ac26313"""
389
+
390
+ if top_k > 0:
391
+ # Remove all tokens with a probability less than the
392
+ # last token of the top-k
393
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
394
+ logits[indices_to_remove] = filter_value
395
+
396
+ if top_p > 0.0:
397
+ # Cconvert to 1D
398
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
399
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
400
+
401
+ # Remove tokens with cumulative probability above the threshold
402
+ sorted_indices_to_remove = cumulative_probs > top_p
403
+ # Shift the indices to the right to keep also the first token
404
+ # above the threshold
405
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
406
+ sorted_indices_to_remove[..., 0] = 0
407
+ for i in range(sorted_indices.size(0)):
408
+ indices_to_remove = sorted_indices[i][sorted_indices_to_remove[i]]
409
+ logits[i][indices_to_remove] = filter_value
410
+
411
+ return logits
412
+
413
+
414
+ def switch(val1, val2, boolean):
415
+ boolean = boolean.type_as(val1)
416
+ return (1 - boolean) * val1 + boolean * val2
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
tokenizer_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoTokenizer": [
4
+ "Qwen/Qwen-14B--tokenization_qwen.QWenTokenizer",
5
+ null
6
+ ]
7
+ },
8
+ "clean_up_tokenization_spaces": true,
9
+ "model_max_length": 8192,
10
+ "tokenizer_class": "QWenTokenizer"
11
+ }