RioLee commited on
Commit
a1d66ca
1 Parent(s): 84e2892

Upload folder using huggingface_hub

Browse files
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ all_results.json
2
+ logs.txt
3
+ trainer_log.jsonl
4
+ training_loss.png
5
+ train_results.json
README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ base_model: internlm/internlm2-chat-7b
4
+ tags:
5
+ - llama-factory
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: 9f100e26-d997-46e8-afee-721977a16ca9
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # 9f100e26-d997-46e8-afee-721977a16ca9
16
+
17
+ This model is a fine-tuned version of [/home/lirenhao/pretrained_models/internlm2-chat-7b/](https://huggingface.co//home/lirenhao/pretrained_models/internlm2-chat-7b/) on the cpsycoun dataset.
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 1e-06
37
+ - train_batch_size: 4
38
+ - eval_batch_size: 8
39
+ - seed: 42
40
+ - distributed_type: multi-GPU
41
+ - num_devices: 4
42
+ - gradient_accumulation_steps: 28
43
+ - total_train_batch_size: 448
44
+ - total_eval_batch_size: 32
45
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
+ - lr_scheduler_type: cosine
47
+ - num_epochs: 9.0
48
+ - mixed_precision_training: Native AMP
49
+
50
+ ### Training results
51
+
52
+
53
+
54
+ ### Framework versions
55
+
56
+ - Transformers 4.37.1
57
+ - Pytorch 2.1.2+cu121
58
+ - Datasets 2.16.1
59
+ - Tokenizers 0.15.1
all_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.0,
3
+ "train_loss": 1.4981852107577853,
4
+ "train_runtime": 2910.9748,
5
+ "train_samples_per_second": 9.69,
6
+ "train_steps_per_second": 0.022
7
+ }
config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/lirenhao/pretrained_models/internlm2-chat-7b/",
3
+ "architectures": [
4
+ "InternLM2ForCausalLM"
5
+ ],
6
+ "attn_implementation": "eager",
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_internlm2.InternLM2Config",
9
+ "AutoModel": "modeling_internlm2.InternLM2ForCausalLM",
10
+ "AutoModelForCausalLM": "modeling_internlm2.InternLM2ForCausalLM"
11
+ },
12
+ "bias": false,
13
+ "bos_token_id": 1,
14
+ "eos_token_id": 2,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 4096,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 14336,
19
+ "max_position_embeddings": 32768,
20
+ "model_type": "internlm2",
21
+ "num_attention_heads": 32,
22
+ "num_hidden_layers": 32,
23
+ "num_key_value_heads": 8,
24
+ "pad_token_id": 2,
25
+ "rms_norm_eps": 1e-05,
26
+ "rope_scaling": {
27
+ "factor": 2.0,
28
+ "type": "dynamic"
29
+ },
30
+ "rope_theta": 1000000,
31
+ "tie_word_embeddings": false,
32
+ "torch_dtype": "float16",
33
+ "transformers_version": "4.37.1",
34
+ "use_cache": false,
35
+ "vocab_size": 92544
36
+ }
configuration_internlm2.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on transformers/src/transformers/models/llama/configuration_llama.py
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ """ InternLM2 model configuration"""
18
+
19
+ from transformers.configuration_utils import PretrainedConfig
20
+ from transformers.utils import logging
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+ INTERNLM2_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
25
+
26
+
27
+ # Modified from transformers.model.llama.configuration_llama.LlamaConfig
28
+ class InternLM2Config(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`InternLM2Model`]. It is used to instantiate
31
+ an InternLM2 model according to the specified arguments, defining the model architecture. Instantiating a
32
+ configuration with the defaults will yield a similar configuration to that of the InternLM2-7B.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 32000):
40
+ Vocabulary size of the InternLM2 model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`InternLM2Model`]
42
+ hidden_size (`int`, *optional*, defaults to 4096):
43
+ Dimension of the hidden representations.
44
+ intermediate_size (`int`, *optional*, defaults to 11008):
45
+ Dimension of the MLP representations.
46
+ num_hidden_layers (`int`, *optional*, defaults to 32):
47
+ Number of hidden layers in the Transformer encoder.
48
+ num_attention_heads (`int`, *optional*, defaults to 32):
49
+ Number of attention heads for each attention layer in the Transformer encoder.
50
+ num_key_value_heads (`int`, *optional*):
51
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
52
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
53
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
54
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
55
+ by meanpooling all the original heads within that group. For more details checkout [this
56
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
57
+ `num_attention_heads`.
58
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
59
+ The non-linear activation function (function or string) in the decoder.
60
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
61
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
62
+ just in case (e.g., 512 or 1024 or 2048).
63
+ initializer_range (`float`, *optional*, defaults to 0.02):
64
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
65
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
66
+ The epsilon used by the rms normalization layers.
67
+ use_cache (`bool`, *optional*, defaults to `True`):
68
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
69
+ relevant if `config.is_decoder=True`.
70
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
71
+ Whether to tie weight embeddings
72
+ Example:
73
+
74
+ """
75
+ model_type = "internlm2"
76
+ _auto_class = "AutoConfig"
77
+
78
+ def __init__( # pylint: disable=W0102
79
+ self,
80
+ vocab_size=103168,
81
+ hidden_size=4096,
82
+ intermediate_size=11008,
83
+ num_hidden_layers=32,
84
+ num_attention_heads=32,
85
+ num_key_value_heads=None,
86
+ hidden_act="silu",
87
+ max_position_embeddings=2048,
88
+ initializer_range=0.02,
89
+ rms_norm_eps=1e-6,
90
+ use_cache=True,
91
+ pad_token_id=0,
92
+ bos_token_id=1,
93
+ eos_token_id=2,
94
+ tie_word_embeddings=False,
95
+ bias=True,
96
+ rope_theta=10000,
97
+ rope_scaling=None,
98
+ attn_implementation="eager",
99
+ **kwargs,
100
+ ):
101
+ self.vocab_size = vocab_size
102
+ self.max_position_embeddings = max_position_embeddings
103
+ self.hidden_size = hidden_size
104
+ self.intermediate_size = intermediate_size
105
+ self.num_hidden_layers = num_hidden_layers
106
+ self.num_attention_heads = num_attention_heads
107
+ self.bias = bias
108
+
109
+ if num_key_value_heads is None:
110
+ num_key_value_heads = num_attention_heads
111
+ self.num_key_value_heads = num_key_value_heads
112
+
113
+ self.hidden_act = hidden_act
114
+ self.initializer_range = initializer_range
115
+ self.rms_norm_eps = rms_norm_eps
116
+ self.use_cache = use_cache
117
+ self.rope_theta = rope_theta
118
+ self.rope_scaling = rope_scaling
119
+ self._rope_scaling_validation()
120
+
121
+ self.attn_implementation = attn_implementation
122
+ if self.attn_implementation is None:
123
+ self.attn_implementation = "eager"
124
+ super().__init__(
125
+ pad_token_id=pad_token_id,
126
+ bos_token_id=bos_token_id,
127
+ eos_token_id=eos_token_id,
128
+ tie_word_embeddings=tie_word_embeddings,
129
+ **kwargs,
130
+ )
131
+
132
+ def _rope_scaling_validation(self):
133
+ """
134
+ Validate the `rope_scaling` configuration.
135
+ """
136
+ if self.rope_scaling is None:
137
+ return
138
+
139
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
140
+ raise ValueError(
141
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
142
+ f"got {self.rope_scaling}"
143
+ )
144
+ rope_scaling_type = self.rope_scaling.get("type", None)
145
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
146
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
147
+ raise ValueError(
148
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
149
+ )
150
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
151
+ raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 2,
6
+ "transformers_version": "4.37.1"
7
+ }
logs.txt ADDED
@@ -0,0 +1,981 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0
  0%| | 0/63 [00:00<?, ?it/s]/home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/torch/utils/checkpoint.py:429: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  2%|▏ | 1/63 [00:44<45:32, 44.06s/it]
2
  3%|▎ | 2/63 [01:23<42:01, 41.33s/it]
3
  5%|▍ | 3/63 [02:04<41:04, 41.08s/it]
4
  6%|▋ | 4/63 [02:43<39:37, 40.30s/it]
5
  8%|▊ | 5/63 [03:22<38:23, 39.72s/it][2024-02-01 14:26:04,941] [INFO] [loss_scaler.py:190:update_scale] [deepspeed] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 65536, but hysteresis is 2. Reducing hysteresis to 1
 
6
  10%|▉ | 6/63 [04:03<38:08, 40.15s/it][2024-02-01 14:26:44,502] [INFO] [loss_scaler.py:183:update_scale] [deepspeed] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 65536, reducing to 32768
 
7
  11%|█ | 7/63 [04:42<37:17, 39.96s/it]
8
  13%|█▎ | 8/63 [05:21<36:23, 39.71s/it]
9
  14%|█▍ | 9/63 [06:01<35:39, 39.62s/it]
10
  16%|█▌ | 10/63 [06:41<35:16, 39.93s/it]
11
 
 
12
  16%|█▌ | 10/63 [06:41<35:16, 39.93s/it]
13
  17%|█▋ | 11/63 [07:20<34:22, 39.65s/it]
14
  19%|█▉ | 12/63 [08:00<33:47, 39.76s/it]
15
  21%|██ | 13/63 [08:39<32:56, 39.53s/it]
16
  22%|██▏ | 14/63 [09:20<32:32, 39.85s/it]
17
  24%|██▍ | 15/63 [09:59<31:45, 39.69s/it]
18
  25%|██▌ | 16/63 [10:38<30:47, 39.31s/it]
19
  27%|██▋ | 17/63 [11:19<30:31, 39.82s/it]
20
  29%|██▊ | 18/63 [11:58<29:51, 39.81s/it]
21
  30%|███ | 19/63 [12:39<29:15, 39.89s/it]
22
  32%|███▏ | 20/63 [13:19<28:42, 40.06s/it]
23
 
 
24
  32%|███▏ | 20/63 [13:19<28:42, 40.06s/it]
25
  33%|███▎ | 21/63 [13:59<27:59, 39.99s/it][INFO|trainer.py:2926] 2024-02-01 14:36:12,897 >> Saving model checkpoint to /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  35%|███▍ | 22/63 [16:52<54:35, 79.88s/it]
27
  37%|███▋ | 23/63 [17:32<45:23, 68.08s/it]
28
  38%|███▊ | 24/63 [18:15<39:17, 60.44s/it]
29
  40%|███▉ | 25/63 [18:54<34:12, 54.02s/it]
30
  41%|████▏ | 26/63 [19:33<30:33, 49.54s/it]
31
  43%|████▎ | 27/63 [20:12<27:49, 46.38s/it]
32
  44%|████▍ | 28/63 [20:51<25:45, 44.17s/it]
33
  46%|████▌ | 29/63 [21:31<24:19, 42.92s/it]
34
  48%|████▊ | 30/63 [22:11<23:07, 42.06s/it]
35
 
 
36
  48%|████▊ | 30/63 [22:11<23:07, 42.06s/it]
37
  49%|████▉ | 31/63 [22:52<22:17, 41.80s/it]
38
  51%|█████ | 32/63 [23:32<21:11, 41.02s/it]
39
  52%|█████▏ | 33/63 [24:10<20:06, 40.20s/it]
40
  54%|█████▍ | 34/63 [24:49<19:18, 39.96s/it]
41
  56%|█████▌ | 35/63 [25:30<18:43, 40.11s/it]
42
  57%|█████▋ | 36/63 [26:10<18:03, 40.13s/it]
43
  59%|█████▊ | 37/63 [26:49<17:12, 39.70s/it]
44
  60%|██████ | 38/63 [27:29<16:36, 39.88s/it]
45
  62%|██████▏ | 39/63 [28:08<15:48, 39.51s/it]
46
  63%|██████▎ | 40/63 [28:46<15:04, 39.34s/it]
47
 
 
48
  63%|██████▎ | 40/63 [28:46<15:04, 39.34s/it]
49
  65%|██████▌ | 41/63 [29:27<14:36, 39.84s/it]
50
  67%|██████▋ | 42/63 [30:07<13:57, 39.87s/it][INFO|trainer.py:2926] 2024-02-01 14:52:21,426 >> Saving model checkpoint to /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  68%|██████▊ | 43/63 [33:01<26:37, 79.86s/it]
52
  70%|██████▉ | 44/63 [33:41<21:31, 67.96s/it]
53
  71%|███████▏ | 45/63 [34:20<17:47, 59.29s/it]
54
  73%|███████▎ | 46/63 [35:01<15:13, 53.75s/it]
55
  75%|███████▍ | 47/63 [35:41<13:13, 49.58s/it]
56
  76%|███████▌ | 48/63 [36:21<11:40, 46.71s/it]
57
  78%|███████▊ | 49/63 [37:00<10:25, 44.69s/it]
58
  79%|███████▉ | 50/63 [37:42<09:26, 43.60s/it]
59
 
 
60
  79%|███████▉ | 50/63 [37:42<09:26, 43.60s/it]
61
  81%|████████ | 51/63 [38:20<08:24, 42.07s/it]
62
  83%|████████▎ | 52/63 [39:00<07:34, 41.29s/it]
63
  84%|████████▍ | 53/63 [39:41<06:52, 41.22s/it]
64
  86%|████████▌ | 54/63 [40:21<06:07, 40.87s/it]
65
  87%|████████▋ | 55/63 [41:00<05:22, 40.28s/it]
66
  89%|████████▉ | 56/63 [41:38<04:39, 39.88s/it]
67
  90%|█████████ | 57/63 [42:18<03:58, 39.78s/it]
68
  92%|█████████▏| 58/63 [42:59<03:20, 40.10s/it]
69
  94%|█████████▎| 59/63 [43:39<02:40, 40.24s/it]
70
  95%|█████████▌| 60/63 [44:19<02:00, 40.04s/it]
71
 
 
72
  95%|█████████▌| 60/63 [44:19<02:00, 40.04s/it]
73
  97%|█████████▋| 61/63 [44:58<01:19, 39.67s/it]
74
  98%|█████████▊| 62/63 [45:38<00:39, 39.80s/it]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2024-02-01 14:20:07,768] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
2
+ [2024-02-01 14:20:09,368] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only.
3
+ [2024-02-01 14:20:09,369] [INFO] [runner.py:568:main] cmd = /home/lirenhao/anaconda3/envs/llama_factory/bin/python -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgM119 --master_addr=127.0.0.1 --master_port=2345 --enable_each_rank_log=None /home/lirenhao/projects/LLaMA-Factory/src/train_bash.py --deepspeed ds_config.json --stage sft --model_name_or_path /home/lirenhao/pretrained_models/internlm2-chat-7b/ --do_train --dataset cpsycoun --template intern2 --finetuning_type full --lora_target wqkv --output_dir /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9 --overwrite_cache --overwrite_output_dir --per_device_train_batch_size 4 --gradient_accumulation_steps 28 --lr_scheduler_type cosine --logging_steps 10 --save_steps 21 --learning_rate 1e-6 --num_train_epochs 9.0 --plot_loss --fp16
4
+ [2024-02-01 14:20:12,819] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
5
+ [2024-02-01 14:20:14,435] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3]}
6
+ [2024-02-01 14:20:14,436] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=4, node_rank=0
7
+ [2024-02-01 14:20:14,436] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(<class 'list'>, {'localhost': [0, 1, 2, 3]})
8
+ [2024-02-01 14:20:14,436] [INFO] [launch.py:163:main] dist_world_size=4
9
+ [2024-02-01 14:20:14,436] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3
10
+ [2024-02-01 14:20:19,797] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
11
+ [2024-02-01 14:20:20,069] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
12
+ [2024-02-01 14:20:20,128] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
13
+ [2024-02-01 14:20:20,157] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
14
+ [2024-02-01 14:20:22,839] [INFO] [comm.py:637:init_distributed] cdb=None
15
+ [2024-02-01 14:20:23,347] [INFO] [comm.py:637:init_distributed] cdb=None
16
+ [2024-02-01 14:20:23,364] [INFO] [comm.py:637:init_distributed] cdb=None
17
+ [2024-02-01 14:20:23,375] [INFO] [comm.py:637:init_distributed] cdb=None
18
+ [2024-02-01 14:20:23,376] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl
19
+ 02/01/2024 14:20:24 - INFO - llmtuner.hparams.parser - Process rank: 2, device: cuda:2, n_gpu: 1
20
+ distributed training: True, compute dtype: torch.float16
21
+ 02/01/2024 14:20:24 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments(
22
+ _n_gpu=1,
23
+ adafactor=False,
24
+ adam_beta1=0.9,
25
+ adam_beta2=0.999,
26
+ adam_epsilon=1e-08,
27
+ auto_find_batch_size=False,
28
+ bf16=False,
29
+ bf16_full_eval=False,
30
+ data_seed=None,
31
+ dataloader_drop_last=False,
32
+ dataloader_num_workers=0,
33
+ dataloader_persistent_workers=False,
34
+ dataloader_pin_memory=True,
35
+ ddp_backend=None,
36
+ ddp_broadcast_buffers=None,
37
+ ddp_bucket_cap_mb=None,
38
+ ddp_find_unused_parameters=None,
39
+ ddp_timeout=1800,
40
+ debug=[],
41
+ deepspeed=ds_config.json,
42
+ disable_tqdm=False,
43
+ dispatch_batches=None,
44
+ do_eval=False,
45
+ do_predict=False,
46
+ do_train=True,
47
+ eval_accumulation_steps=None,
48
+ eval_delay=0,
49
+ eval_steps=None,
50
+ evaluation_strategy=no,
51
+ fp16=True,
52
+ fp16_backend=auto,
53
+ fp16_full_eval=False,
54
+ fp16_opt_level=O1,
55
+ fsdp=[],
56
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False},
57
+ fsdp_min_num_params=0,
58
+ fsdp_transformer_layer_cls_to_wrap=None,
59
+ full_determinism=False,
60
+ generation_config=None,
61
+ generation_max_length=None,
62
+ generation_num_beams=None,
63
+ gradient_accumulation_steps=28,
64
+ gradient_checkpointing=False,
65
+ gradient_checkpointing_kwargs=None,
66
+ greater_is_better=None,
67
+ group_by_length=False,
68
+ half_precision_backend=auto,
69
+ hub_always_push=False,
70
+ hub_model_id=None,
71
+ hub_private_repo=False,
72
+ hub_strategy=every_save,
73
+ hub_token=<HUB_TOKEN>,
74
+ ignore_data_skip=False,
75
+ include_inputs_for_metrics=False,
76
+ include_num_input_tokens_seen=False,
77
+ include_tokens_per_second=False,
78
+ jit_mode_eval=False,
79
+ label_names=None,
80
+ label_smoothing_factor=0.0,
81
+ learning_rate=1e-06,
82
+ length_column_name=length,
83
+ load_best_model_at_end=False,
84
+ local_rank=2,
85
+ log_level=passive,
86
+ log_level_replica=warning,
87
+ log_on_each_node=True,
88
+ logging_dir=/home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/runs/Feb01_14-20-22_siat-a100-4-02,
89
+ logging_first_step=False,
90
+ logging_nan_inf_filter=True,
91
+ logging_steps=10,
92
+ logging_strategy=steps,
93
+ lr_scheduler_kwargs={},
94
+ lr_scheduler_type=cosine,
95
+ max_grad_norm=1.0,
96
+ max_steps=-1,
97
+ metric_for_best_model=None,
98
+ mp_parameters=,
99
+ neftune_noise_alpha=None,
100
+ no_cuda=False,
101
+ num_train_epochs=9.0,
102
+ optim=adamw_torch,
103
+ optim_args=None,
104
+ output_dir=/home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9,
105
+ overwrite_output_dir=True,
106
+ past_index=-1,
107
+ per_device_eval_batch_size=8,
108
+ per_device_train_batch_size=4,
109
+ predict_with_generate=False,
110
+ prediction_loss_only=False,
111
+ push_to_hub=False,
112
+ push_to_hub_model_id=None,
113
+ push_to_hub_organization=None,
114
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
115
+ ray_scope=last,
116
+ remove_unused_columns=True,
117
+ report_to=[],
118
+ resume_from_checkpoint=None,
119
+ run_name=/home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9,
120
+ save_on_each_node=False,
121
+ save_only_model=False,
122
+ save_safetensors=True,
123
+ save_steps=21,
124
+ save_strategy=steps,
125
+ save_total_limit=None,
126
+ seed=42,
127
+ skip_memory_metrics=True,
128
+ sortish_sampler=False,
129
+ split_batches=False,
130
+ tf32=None,
131
+ torch_compile=False,
132
+ torch_compile_backend=None,
133
+ torch_compile_mode=None,
134
+ torchdynamo=None,
135
+ tpu_metrics_debug=False,
136
+ tpu_num_cores=None,
137
+ use_cpu=False,
138
+ use_ipex=False,
139
+ use_legacy_prediction_loop=False,
140
+ use_mps_device=False,
141
+ warmup_ratio=0.0,
142
+ warmup_steps=0,
143
+ weight_decay=0.0,
144
+ )
145
+ 02/01/2024 14:20:24 - INFO - llmtuner.hparams.parser - Process rank: 0, device: cuda:0, n_gpu: 1
146
+ distributed training: True, compute dtype: torch.float16
147
+ 02/01/2024 14:20:24 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments(
148
+ _n_gpu=1,
149
+ adafactor=False,
150
+ adam_beta1=0.9,
151
+ adam_beta2=0.999,
152
+ adam_epsilon=1e-08,
153
+ auto_find_batch_size=False,
154
+ bf16=False,
155
+ bf16_full_eval=False,
156
+ data_seed=None,
157
+ dataloader_drop_last=False,
158
+ dataloader_num_workers=0,
159
+ dataloader_persistent_workers=False,
160
+ dataloader_pin_memory=True,
161
+ ddp_backend=None,
162
+ ddp_broadcast_buffers=None,
163
+ ddp_bucket_cap_mb=None,
164
+ ddp_find_unused_parameters=None,
165
+ ddp_timeout=1800,
166
+ debug=[],
167
+ deepspeed=ds_config.json,
168
+ disable_tqdm=False,
169
+ dispatch_batches=None,
170
+ do_eval=False,
171
+ do_predict=False,
172
+ do_train=True,
173
+ eval_accumulation_steps=None,
174
+ eval_delay=0,
175
+ eval_steps=None,
176
+ evaluation_strategy=no,
177
+ fp16=True,
178
+ fp16_backend=auto,
179
+ fp16_full_eval=False,
180
+ fp16_opt_level=O1,
181
+ fsdp=[],
182
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False},
183
+ fsdp_min_num_params=0,
184
+ fsdp_transformer_layer_cls_to_wrap=None,
185
+ full_determinism=False,
186
+ generation_config=None,
187
+ generation_max_length=None,
188
+ generation_num_beams=None,
189
+ gradient_accumulation_steps=28,
190
+ gradient_checkpointing=False,
191
+ gradient_checkpointing_kwargs=None,
192
+ greater_is_better=None,
193
+ group_by_length=False,
194
+ half_precision_backend=auto,
195
+ hub_always_push=False,
196
+ hub_model_id=None,
197
+ hub_private_repo=False,
198
+ hub_strategy=every_save,
199
+ hub_token=<HUB_TOKEN>,
200
+ ignore_data_skip=False,
201
+ include_inputs_for_metrics=False,
202
+ include_num_input_tokens_seen=False,
203
+ include_tokens_per_second=False,
204
+ jit_mode_eval=False,
205
+ label_names=None,
206
+ label_smoothing_factor=0.0,
207
+ learning_rate=1e-06,
208
+ length_column_name=length,
209
+ load_best_model_at_end=False,
210
+ local_rank=0,
211
+ log_level=passive,
212
+ log_level_replica=warning,
213
+ log_on_each_node=True,
214
+ logging_dir=/home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/runs/Feb01_14-20-23_siat-a100-4-02,
215
+ logging_first_step=False,
216
+ logging_nan_inf_filter=True,
217
+ logging_steps=10,
218
+ logging_strategy=steps,
219
+ lr_scheduler_kwargs={},
220
+ lr_scheduler_type=cosine,
221
+ max_grad_norm=1.0,
222
+ max_steps=-1,
223
+ metric_for_best_model=None,
224
+ mp_parameters=,
225
+ neftune_noise_alpha=None,
226
+ no_cuda=False,
227
+ num_train_epochs=9.0,
228
+ optim=adamw_torch,
229
+ optim_args=None,
230
+ output_dir=/home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9,
231
+ overwrite_output_dir=True,
232
+ past_index=-1,
233
+ per_device_eval_batch_size=8,
234
+ per_device_train_batch_size=4,
235
+ predict_with_generate=False,
236
+ prediction_loss_only=False,
237
+ push_to_hub=False,
238
+ push_to_hub_model_id=None,
239
+ push_to_hub_organization=None,
240
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
241
+ ray_scope=last,
242
+ remove_unused_columns=True,
243
+ report_to=[],
244
+ resume_from_checkpoint=None,
245
+ run_name=/home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9,
246
+ save_on_each_node=False,
247
+ save_only_model=False,
248
+ save_safetensors=True,
249
+ save_steps=21,
250
+ save_strategy=steps,
251
+ save_total_limit=None,
252
+ seed=42,
253
+ skip_memory_metrics=True,
254
+ sortish_sampler=False,
255
+ split_batches=False,
256
+ tf32=None,
257
+ torch_compile=False,
258
+ torch_compile_backend=None,
259
+ torch_compile_mode=None,
260
+ torchdynamo=None,
261
+ tpu_metrics_debug=False,
262
+ tpu_num_cores=None,
263
+ use_cpu=False,
264
+ use_ipex=False,
265
+ use_legacy_prediction_loop=False,
266
+ use_mps_device=False,
267
+ warmup_ratio=0.0,
268
+ warmup_steps=0,
269
+ weight_decay=0.0,
270
+ )
271
+ [INFO|tokenization_utils_base.py:2025] 2024-02-01 14:20:24,513 >> loading file ./tokenizer.model
272
+ [INFO|tokenization_utils_base.py:2025] 2024-02-01 14:20:24,513 >> loading file added_tokens.json
273
+ [INFO|tokenization_utils_base.py:2025] 2024-02-01 14:20:24,513 >> loading file special_tokens_map.json
274
+ [INFO|tokenization_utils_base.py:2025] 2024-02-01 14:20:24,513 >> loading file tokenizer_config.json
275
+ [INFO|tokenization_utils_base.py:2025] 2024-02-01 14:20:24,513 >> loading file tokenizer.json
276
+ [INFO|configuration_utils.py:727] 2024-02-01 14:20:24,850 >> loading configuration file /home/lirenhao/pretrained_models/internlm2-chat-7b/config.json
277
+ [INFO|configuration_utils.py:727] 2024-02-01 14:20:24,852 >> loading configuration file /home/lirenhao/pretrained_models/internlm2-chat-7b/config.json
278
+ [INFO|configuration_utils.py:792] 2024-02-01 14:20:24,854 >> Model config InternLM2Config {
279
+ "_name_or_path": "/home/lirenhao/pretrained_models/internlm2-chat-7b/",
280
+ "architectures": [
281
+ "InternLM2ForCausalLM"
282
+ ],
283
+ "attn_implementation": "eager",
284
+ "auto_map": {
285
+ "AutoConfig": "configuration_internlm2.InternLM2Config",
286
+ "AutoModel": "modeling_internlm2.InternLM2ForCausalLM",
287
+ "AutoModelForCausalLM": "modeling_internlm2.InternLM2ForCausalLM"
288
+ },
289
+ "bias": false,
290
+ "bos_token_id": 1,
291
+ "eos_token_id": 2,
292
+ "hidden_act": "silu",
293
+ "hidden_size": 4096,
294
+ "initializer_range": 0.02,
295
+ "intermediate_size": 14336,
296
+ "max_position_embeddings": 32768,
297
+ "model_type": "internlm2",
298
+ "num_attention_heads": 32,
299
+ "num_hidden_layers": 32,
300
+ "num_key_value_heads": 8,
301
+ "pad_token_id": 2,
302
+ "rms_norm_eps": 1e-05,
303
+ "rope_scaling": {
304
+ "factor": 2.0,
305
+ "type": "dynamic"
306
+ },
307
+ "rope_theta": 1000000,
308
+ "tie_word_embeddings": false,
309
+ "torch_dtype": "float16",
310
+ "transformers_version": "4.37.1",
311
+ "use_cache": true,
312
+ "vocab_size": 92544
313
+ }
314
+
315
+ [INFO|modeling_utils.py:3475] 2024-02-01 14:20:24,903 >> loading weights file /home/lirenhao/pretrained_models/internlm2-chat-7b/pytorch_model.bin.index.json
316
+ [INFO|modeling_utils.py:1428] 2024-02-01 14:20:24,903 >> Instantiating InternLM2ForCausalLM model under default dtype torch.float16.
317
+ [INFO|configuration_utils.py:826] 2024-02-01 14:20:24,905 >> Generate config GenerationConfig {
318
+ "bos_token_id": 1,
319
+ "eos_token_id": 2,
320
+ "pad_token_id": 2
321
+ }
322
+
323
+ 02/01/2024 14:20:24 - INFO - llmtuner.hparams.parser - Process rank: 1, device: cuda:1, n_gpu: 1
324
+ distributed training: True, compute dtype: torch.float16
325
+ 02/01/2024 14:20:24 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments(
326
+ _n_gpu=1,
327
+ adafactor=False,
328
+ adam_beta1=0.9,
329
+ adam_beta2=0.999,
330
+ adam_epsilon=1e-08,
331
+ auto_find_batch_size=False,
332
+ bf16=False,
333
+ bf16_full_eval=False,
334
+ data_seed=None,
335
+ dataloader_drop_last=False,
336
+ dataloader_num_workers=0,
337
+ dataloader_persistent_workers=False,
338
+ dataloader_pin_memory=True,
339
+ ddp_backend=None,
340
+ ddp_broadcast_buffers=None,
341
+ ddp_bucket_cap_mb=None,
342
+ ddp_find_unused_parameters=None,
343
+ ddp_timeout=1800,
344
+ debug=[],
345
+ deepspeed=ds_config.json,
346
+ disable_tqdm=False,
347
+ dispatch_batches=None,
348
+ do_eval=False,
349
+ do_predict=False,
350
+ do_train=True,
351
+ eval_accumulation_steps=None,
352
+ eval_delay=0,
353
+ eval_steps=None,
354
+ evaluation_strategy=no,
355
+ fp16=True,
356
+ fp16_backend=auto,
357
+ fp16_full_eval=False,
358
+ fp16_opt_level=O1,
359
+ fsdp=[],
360
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False},
361
+ fsdp_min_num_params=0,
362
+ fsdp_transformer_layer_cls_to_wrap=None,
363
+ full_determinism=False,
364
+ generation_config=None,
365
+ generation_max_length=None,
366
+ generation_num_beams=None,
367
+ gradient_accumulation_steps=28,
368
+ gradient_checkpointing=False,
369
+ gradient_checkpointing_kwargs=None,
370
+ greater_is_better=None,
371
+ group_by_length=False,
372
+ half_precision_backend=auto,
373
+ hub_always_push=False,
374
+ hub_model_id=None,
375
+ hub_private_repo=False,
376
+ hub_strategy=every_save,
377
+ hub_token=<HUB_TOKEN>,
378
+ ignore_data_skip=False,
379
+ include_inputs_for_metrics=False,
380
+ include_num_input_tokens_seen=False,
381
+ include_tokens_per_second=False,
382
+ jit_mode_eval=False,
383
+ label_names=None,
384
+ label_smoothing_factor=0.0,
385
+ learning_rate=1e-06,
386
+ length_column_name=length,
387
+ load_best_model_at_end=False,
388
+ local_rank=1,
389
+ log_level=passive,
390
+ log_level_replica=warning,
391
+ log_on_each_node=True,
392
+ logging_dir=/home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/runs/Feb01_14-20-23_siat-a100-4-02,
393
+ logging_first_step=False,
394
+ logging_nan_inf_filter=True,
395
+ logging_steps=10,
396
+ logging_strategy=steps,
397
+ lr_scheduler_kwargs={},
398
+ lr_scheduler_type=cosine,
399
+ max_grad_norm=1.0,
400
+ max_steps=-1,
401
+ metric_for_best_model=None,
402
+ mp_parameters=,
403
+ neftune_noise_alpha=None,
404
+ no_cuda=False,
405
+ num_train_epochs=9.0,
406
+ optim=adamw_torch,
407
+ optim_args=None,
408
+ output_dir=/home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9,
409
+ overwrite_output_dir=True,
410
+ past_index=-1,
411
+ per_device_eval_batch_size=8,
412
+ per_device_train_batch_size=4,
413
+ predict_with_generate=False,
414
+ prediction_loss_only=False,
415
+ push_to_hub=False,
416
+ push_to_hub_model_id=None,
417
+ push_to_hub_organization=None,
418
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
419
+ ray_scope=last,
420
+ remove_unused_columns=True,
421
+ report_to=[],
422
+ resume_from_checkpoint=None,
423
+ run_name=/home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9,
424
+ save_on_each_node=False,
425
+ save_only_model=False,
426
+ save_safetensors=True,
427
+ save_steps=21,
428
+ save_strategy=steps,
429
+ save_total_limit=None,
430
+ seed=42,
431
+ skip_memory_metrics=True,
432
+ sortish_sampler=False,
433
+ split_batches=False,
434
+ tf32=None,
435
+ torch_compile=False,
436
+ torch_compile_backend=None,
437
+ torch_compile_mode=None,
438
+ torchdynamo=None,
439
+ tpu_metrics_debug=False,
440
+ tpu_num_cores=None,
441
+ use_cpu=False,
442
+ use_ipex=False,
443
+ use_legacy_prediction_loop=False,
444
+ use_mps_device=False,
445
+ warmup_ratio=0.0,
446
+ warmup_steps=0,
447
+ weight_decay=0.0,
448
+ )
449
+ 02/01/2024 14:20:24 - INFO - llmtuner.hparams.parser - Process rank: 3, device: cuda:3, n_gpu: 1
450
+ distributed training: True, compute dtype: torch.float16
451
+ 02/01/2024 14:20:24 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments(
452
+ _n_gpu=1,
453
+ adafactor=False,
454
+ adam_beta1=0.9,
455
+ adam_beta2=0.999,
456
+ adam_epsilon=1e-08,
457
+ auto_find_batch_size=False,
458
+ bf16=False,
459
+ bf16_full_eval=False,
460
+ data_seed=None,
461
+ dataloader_drop_last=False,
462
+ dataloader_num_workers=0,
463
+ dataloader_persistent_workers=False,
464
+ dataloader_pin_memory=True,
465
+ ddp_backend=None,
466
+ ddp_broadcast_buffers=None,
467
+ ddp_bucket_cap_mb=None,
468
+ ddp_find_unused_parameters=None,
469
+ ddp_timeout=1800,
470
+ debug=[],
471
+ deepspeed=ds_config.json,
472
+ disable_tqdm=False,
473
+ dispatch_batches=None,
474
+ do_eval=False,
475
+ do_predict=False,
476
+ do_train=True,
477
+ eval_accumulation_steps=None,
478
+ eval_delay=0,
479
+ eval_steps=None,
480
+ evaluation_strategy=no,
481
+ fp16=True,
482
+ fp16_backend=auto,
483
+ fp16_full_eval=False,
484
+ fp16_opt_level=O1,
485
+ fsdp=[],
486
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False},
487
+ fsdp_min_num_params=0,
488
+ fsdp_transformer_layer_cls_to_wrap=None,
489
+ full_determinism=False,
490
+ generation_config=None,
491
+ generation_max_length=None,
492
+ generation_num_beams=None,
493
+ gradient_accumulation_steps=28,
494
+ gradient_checkpointing=False,
495
+ gradient_checkpointing_kwargs=None,
496
+ greater_is_better=None,
497
+ group_by_length=False,
498
+ half_precision_backend=auto,
499
+ hub_always_push=False,
500
+ hub_model_id=None,
501
+ hub_private_repo=False,
502
+ hub_strategy=every_save,
503
+ hub_token=<HUB_TOKEN>,
504
+ ignore_data_skip=False,
505
+ include_inputs_for_metrics=False,
506
+ include_num_input_tokens_seen=False,
507
+ include_tokens_per_second=False,
508
+ jit_mode_eval=False,
509
+ label_names=None,
510
+ label_smoothing_factor=0.0,
511
+ learning_rate=1e-06,
512
+ length_column_name=length,
513
+ load_best_model_at_end=False,
514
+ local_rank=3,
515
+ log_level=passive,
516
+ log_level_replica=warning,
517
+ log_on_each_node=True,
518
+ logging_dir=/home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/runs/Feb01_14-20-23_siat-a100-4-02,
519
+ logging_first_step=False,
520
+ logging_nan_inf_filter=True,
521
+ logging_steps=10,
522
+ logging_strategy=steps,
523
+ lr_scheduler_kwargs={},
524
+ lr_scheduler_type=cosine,
525
+ max_grad_norm=1.0,
526
+ max_steps=-1,
527
+ metric_for_best_model=None,
528
+ mp_parameters=,
529
+ neftune_noise_alpha=None,
530
+ no_cuda=False,
531
+ num_train_epochs=9.0,
532
+ optim=adamw_torch,
533
+ optim_args=None,
534
+ output_dir=/home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9,
535
+ overwrite_output_dir=True,
536
+ past_index=-1,
537
+ per_device_eval_batch_size=8,
538
+ per_device_train_batch_size=4,
539
+ predict_with_generate=False,
540
+ prediction_loss_only=False,
541
+ push_to_hub=False,
542
+ push_to_hub_model_id=None,
543
+ push_to_hub_organization=None,
544
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
545
+ ray_scope=last,
546
+ remove_unused_columns=True,
547
+ report_to=[],
548
+ resume_from_checkpoint=None,
549
+ run_name=/home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9,
550
+ save_on_each_node=False,
551
+ save_only_model=False,
552
+ save_safetensors=True,
553
+ save_steps=21,
554
+ save_strategy=steps,
555
+ save_total_limit=None,
556
+ seed=42,
557
+ skip_memory_metrics=True,
558
+ sortish_sampler=False,
559
+ split_batches=False,
560
+ tf32=None,
561
+ torch_compile=False,
562
+ torch_compile_backend=None,
563
+ torch_compile_mode=None,
564
+ torchdynamo=None,
565
+ tpu_metrics_debug=False,
566
+ tpu_num_cores=None,
567
+ use_cpu=False,
568
+ use_ipex=False,
569
+ use_legacy_prediction_loop=False,
570
+ use_mps_device=False,
571
+ warmup_ratio=0.0,
572
+ warmup_steps=0,
573
+ weight_decay=0.0,
574
+ )
575
+
576
+ return self.fget.__get__(instance, owner)()
577
+
578
+ return self.fget.__get__(instance, owner)()
579
+
580
+ return self.fget.__get__(instance, owner)()
581
+
582
+ return self.fget.__get__(instance, owner)()
583
+
584
+ 02/01/2024 14:20:35 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled.
585
+ 02/01/2024 14:20:35 - INFO - llmtuner.model.adapter - Fine-tuning method: Full
586
+
587
+ 02/01/2024 14:20:36 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled.
588
+ 02/01/2024 14:20:36 - INFO - llmtuner.model.adapter - Fine-tuning method: Full
589
+
590
+ [INFO|modeling_utils.py:4352] 2024-02-01 14:20:36,242 >> All model checkpoint weights were used when initializing InternLM2ForCausalLM.
591
+
592
+ [INFO|modeling_utils.py:4360] 2024-02-01 14:20:36,242 >> All the weights of InternLM2ForCausalLM were initialized from the model checkpoint at /home/lirenhao/pretrained_models/internlm2-chat-7b/.
593
+ If your task is similar to the task the model of the checkpoint was trained on, you can already use InternLM2ForCausalLM for predictions without further training.
594
+ [INFO|configuration_utils.py:779] 2024-02-01 14:20:36,247 >> loading configuration file /home/lirenhao/pretrained_models/internlm2-chat-7b/generation_config.json
595
+ [INFO|configuration_utils.py:826] 2024-02-01 14:20:36,248 >> Generate config GenerationConfig {
596
+ "bos_token_id": 1,
597
+ "eos_token_id": 2,
598
+ "pad_token_id": 2
599
+ }
600
+
601
+ 02/01/2024 14:20:36 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled.
602
+ 02/01/2024 14:20:36 - INFO - llmtuner.model.adapter - Fine-tuning method: Full
603
+
604
+ 02/01/2024 14:20:36 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled.
605
+ 02/01/2024 14:20:36 - INFO - llmtuner.model.adapter - Fine-tuning method: Full
606
+ 02/01/2024 14:20:47 - INFO - llmtuner.model.loader - trainable params: 7737708544 || all params: 7737708544 || trainable%: 100.0000
607
+ 02/01/2024 14:20:48 - INFO - llmtuner.data.template - Add <|im_end|> to stop words.
608
+ 02/01/2024 14:20:48 - INFO - llmtuner.model.loader - trainable params: 7737708544 || all params: 7737708544 || trainable%: 100.0000
609
+ 02/01/2024 14:20:48 - INFO - llmtuner.model.loader - trainable params: 7737708544 || all params: 7737708544 || trainable%: 100.0000
610
+ 02/01/2024 14:20:49 - INFO - llmtuner.data.template - Add <|im_end|> to stop words.
611
+ 02/01/2024 14:20:49 - WARNING - llmtuner.data.utils - Checksum failed: missing SHA-1 hash value in dataset_info.json.
612
+ 02/01/2024 14:20:49 - INFO - llmtuner.data.template - Add <|im_end|> to stop words.
613
+ 02/01/2024 14:20:49 - INFO - llmtuner.model.loader - trainable params: 7737708544 || all params: 7737708544 || trainable%: 100.0000
614
+ 02/01/2024 14:20:49 - INFO - llmtuner.data.template - Add <|im_end|> to stop words.
615
+ Using custom data configuration default-7bf826ddf73c2f44
616
+ Loading Dataset Infos from /home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/datasets/packaged_modules/json
617
+ Overwrite dataset info from restored data version if exists.
618
+ Loading Dataset info from /home/lirenhao/.cache/huggingface/datasets/json/default-7bf826ddf73c2f44/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96
619
+ Found cached dataset json (/home/lirenhao/.cache/huggingface/datasets/json/default-7bf826ddf73c2f44/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96)
620
+ Loading Dataset info from /home/lirenhao/.cache/huggingface/datasets/json/default-7bf826ddf73c2f44/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96
621
+
622
+
623
+ 02/01/2024 14:20:53 - WARNING - llmtuner.data.utils - Checksum failed: missing SHA-1 hash value in dataset_info.json.
624
+ 02/01/2024 14:20:53 - WARNING - llmtuner.data.utils - Checksum failed: missing SHA-1 hash value in dataset_info.json.
625
+ 02/01/2024 14:20:53 - WARNING - llmtuner.data.utils - Checksum failed: missing SHA-1 hash value in dataset_info.json.
626
+
627
+
628
+
629
+ Caching processed dataset at /home/lirenhao/.cache/huggingface/datasets/json/default-7bf826ddf73c2f44/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-7cecb244118aac13.arrow
630
+
631
+ input_ids:
632
+ [1, 92543, 9081, 364, 2770, 657, 589, 15358, 17993, 6843, 963, 505, 4576, 11146, 451, 60628, 60384, 60721, 62442, 60752, 4452, 285, 4576, 11146, 451, 60628, 60384, 60721, 62442, 60752, 313, 505, 395, 7659, 1813, 4287, 1762, 560, 505, 8020, 684, 36956, 15358, 31288, 451, 68589, 76659, 71581, 699, 1226, 505, 6342, 442, 517, 11100, 328, 10894, 328, 454, 51978, 756, 285, 4576, 11146, 451, 60628, 60384, 60721, 62442, 60752, 313, 777, 3696, 454, 19187, 19829, 4563, 435, 410, 4287, 12032, 684, 410, 1341, 1893, 569, 6519, 454, 262, 69093, 281, 92542, 364, 92543, 1008, 364, 85064, 60703, 60353, 68856, 68306, 61860, 62703, 69516, 68765, 68984, 60362, 60353, 60376, 68678, 60427, 69944, 60355, 92542, 364, 92543, 525, 11353, 364, 73406, 68865, 68364, 69377, 60353, 86839, 70004, 68364, 69516, 69461, 71677, 68287, 60353, 69029, 68831, 68287, 60355, 68931, 69702, 75326, 71838, 60403, 61860, 62703, 77797, 68540, 60355, 364, 92543, 1008, 364, 74820, 68399, 69088, 60677, 68540, 60353, 61032, 71155, 69059, 60355, 92542, 364, 92543, 525, 11353, 364, 91781, 60353, 73161, 80540, 60415, 82098, 60355, 72010, 71404, 60353, 60403, 76153, 68912, 60381, 74112, 61076, 60504, 364, 92543, 1008, 364, 68856, 68306, 68912, 68326, 75848, 68595, 60353, 69972, 71645, 68473, 68585, 60353, 60404, 68965, 61716, 60418, 68273, 60353, 70124, 70698, 60363, 60355, 68389, 60363, 69667, 68306, 68303, 60353, 61214, 68310, 68758, 68261, 70623, 60355, 92542, 364, 92543, 525, 11353, 364, 72010, 69030, 71711, 61076, 60504, 68522, 60353, 86004, 71645, 68629, 68804, 68592, 69095, 60504, 364, 92543, 1008, 364, 84386, 68268, 68315, 75835, 79506, 60353, 60404, 68965, 72245, 68306, 69377, 60355, 60363, 69836, 60427, 70681, 60353, 69281, 91457, 71102, 62600, 62792, 60425, 60355, 92542, 364, 92543, 525, 11353, 364, 82967, 69068, 87160, 68261, 60504, 68319, 60353, 60403, 70868, 69962, 60871, 69893, 60366, 73603, 68261, 60504, 364, 92543, 1008, 364, 68678, 70219, 92396, 84863, 73603, 68252, 71869, 76758, 60353, 68252, 78650, 68306, 71645, 60355, 60363, 70802, 68626, 71010, 73382, 69893, 60353, 60499, 69361, 61032, 68678, 72415, 60355, 92542, 364, 92543, 525, 11353, 364, 72010, 82120, 68381, 72415, 70860, 69209, 61076, 60504, 68522, 60353, 73880, 60359, 75493, 60359, 72415, 60504, 364, 92543, 1008, 364, 74212, 60353, 60363, 73408, 69836, 73880, 60381, 72415, 60355, 69097, 60353, 88720, 60382, 71343, 68254, 70861, 68892, 60459, 71356, 60586, 60355, 92542, 364, 92543, 525, 11353, 364, 68374, 69209, 73175, 68364, 77514, 61076, 60504, 68522, 60353, 68364, 70033, 60359, 69441, 60359, 68273, 60504, 364, 92543, 1008, 364, 88554, 60355, 60363, 68848, 70033, 81269, 60353, 68965, 60520, 80959, 60355, 68389, 60363, 79837, 80665, 60353, 70465, 70802, 70133, 60355, 92542, 364, 92543, 525, 11353, 364, 76273, 68374, 60353, 69060, 71958, 60353, 68364, 69516, 70848, 69715, 60354, 60355, 81425, 68831, 68637, 60353, 80698, 74131, 73382, 79309, 60355, 364, 92543, 1008, 364, 68369, 61076, 60504, 60462, 69735, 91900, 60827, 60504, 92542, 364, 92543, 525, 11353, 364, 68400, 60353, 69897, 68505, 68364, 69209, 60353, 68908, 69116, 60381, 82567, 69290, 60355, 68265, 60353, 73161, 72826, 68288, 69418, 68304, 68747, 60353, 69068, 60381, 71645, 60359, 68303, 82409, 68615, 69715, 60355, 364, 92543, 1008, 364, 82700, 69460, 70417, 60355, 86492, 60577, 69353, 68301, 60827, 60504, 92542, 364, 92543, 525, 11353, 364, 60577, 68505, 68364, 69209, 68301, 60355, 75630, 82261, 68300, 60353, 68848, 68427, 69836, 73880, 68319, 72415, 68269, 60353, 69095, 68540, 74465, 60504, 2]
633
+ inputs:
634
+ <s> <|im_start|> system
635
+ You are an AI assistant whose name is InternLM (书生·浦语).
636
+ - InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.
637
+ - InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文.<|im_end|>
638
+ <|im_start|> user
639
+ 心理咨询师,我觉得我的胸闷症状越来越严重了,这让我很害怕。<|im_end|>
640
+ <|im_start|> assistant
641
+ 我能理解你的感受,首先我们要明确你的症状并不是生理问题,而是心理问题。我们可以尝试找出引发你胸闷的心理原因。
642
+ <|im_start|> user
643
+ 可是我一直都在找原因,却找不到答案。<|im_end|>
644
+ <|im_start|> assistant
645
+ 不要着急,我们会一步一步地解决这个问题。你能告诉我,你生活中的压力和困扰吗?
646
+ <|im_start|> user
647
+ 我觉得我的压力主要来自于家庭,我和丈夫关系不好,他总是忙于工作,很少关心我。而且我担心我的孩子,怕他们出了什么意外。<|im_end|>
648
+ <|im_start|> assistant
649
+ 你能详细说说吗?比如,你和丈夫之间的问题具体是什么?
650
+ <|im_start|> user
651
+ 我们经常因为一些小事争吵,他总是忽略我的感受。我感到很孤独,就像被困在一个牢笼里。<|im_end|>
652
+ <|im_start|> assistant
653
+ 这种感觉让你想起了什么?或者,你觉得自己在这段婚姻中失去了什么?
654
+ <|im_start|> user
655
+ 让我想想……我觉得我失去了一个温馨的家,一个关��我的丈夫。我一直在努力维持这段婚姻,但现实却让我失望。<|im_end|>
656
+ <|im_start|> assistant
657
+ 你能体会到这种失望带来的情绪吗?比如,伤心、愤怒、失望?
658
+ <|im_start|> user
659
+ 是的,我经常会感到伤心和失望。有时候,我甚至会怀疑自己的人生是不是选错了路。<|im_end|>
660
+ <|im_start|> assistant
661
+ 这些情绪会影响你的日常生活吗?比如,你的睡眠、饮食、工作?
662
+ <|im_start|> user
663
+ 肯定的。我最近睡眠很差,总是做噩梦。而且我吃得也不好,体重一直在下降。<|im_end|>
664
+ <|im_start|> assistant
665
+ 了解到这些,我想告诉你,你的症状是可以改善的。我们可以通过心理治疗,帮助你走出这段困境。
666
+ <|im_start|> user
667
+ 真的吗?那我要如何做呢?<|im_end|>
668
+ <|im_start|> assistant
669
+ 首先,我们要了解你的情绪,学会面对和接纳它们。然后,我们会教你怎么表达自己的需求,让你和丈夫、孩子之间的关系得到改善。
670
+ <|im_start|> user
671
+ 听起来很有道理。那我们从哪里开始呢?<|im_end|>
672
+ <|im_start|> assistant
673
+ 从了解你的情绪开始。试着回想一下,最近一次感到伤心或者失望的时候,是什么原因导致的?</s>
674
+ label_ids:
675
+ [-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 73406, 68865, 68364, 69377, 60353, 86839, 70004, 68364, 69516, 69461, 71677, 68287, 60353, 69029, 68831, 68287, 60355, 68931, 69702, 75326, 71838, 60403, 61860, 62703, 77797, 68540, 60355, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 91781, 60353, 73161, 80540, 60415, 82098, 60355, 72010, 71404, 60353, 60403, 76153, 68912, 60381, 74112, 61076, 60504, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 72010, 69030, 71711, 61076, 60504, 68522, 60353, 86004, 71645, 68629, 68804, 68592, 69095, 60504, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 82967, 69068, 87160, 68261, 60504, 68319, 60353, 60403, 70868, 69962, 60871, 69893, 60366, 73603, 68261, 60504, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 72010, 82120, 68381, 72415, 70860, 69209, 61076, 60504, 68522, 60353, 73880, 60359, 75493, 60359, 72415, 60504, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 68374, 69209, 73175, 68364, 77514, 61076, 60504, 68522, 60353, 68364, 70033, 60359, 69441, 60359, 68273, 60504, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 76273, 68374, 60353, 69060, 71958, 60353, 68364, 69516, 70848, 69715, 60354, 60355, 81425, 68831, 68637, 60353, 80698, 74131, 73382, 79309, 60355, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 68400, 60353, 69897, 68505, 68364, 69209, 60353, 68908, 69116, 60381, 82567, 69290, 60355, 68265, 60353, 73161, 72826, 68288, 69418, 68304, 68747, 60353, 69068, 60381, 71645, 60359, 68303, 82409, 68615, 69715, 60355, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 60577, 68505, 68364, 69209, 68301, 60355, 75630, 82261, 68300, 60353, 68848, 68427, 69836, 73880, 68319, 72415, 68269, 60353, 69095, 68540, 74465, 60504, 2]
676
+ labels:
677
+ 我能理解你的感受,首先我们要明确你的症状并不是生理问题,而是心理问题。我们可以尝试找出引发你胸闷的心理原因。</s> 不要着急,我们会一步一步地解决这个问题。你能告诉我,你生活中的压力和困扰吗?</s> 你能详细说说吗?比如,你和丈夫之间的问题具体是什么?</s> 这种感觉让你想起了什么?或者,你觉得自己在这段婚姻中失去了��么?</s> 你能体会到这种失望带来的情绪吗?比如,伤心、愤怒、失望?</s> 这些情绪会影响你的日常生活吗?比如,你的睡眠、饮食、工作?</s> 了解到这些,我想告诉你,你的症状是可以改善的。我们可以通过心理治疗,帮助你走出这段困境。</s> 首先,我们要了解你的情绪,学会面对和接纳它们。然后,我们会教你怎么表达自己的需求,让你和丈夫、孩子之间的关系得到改善。</s> 从了解你的情绪开始。试着回想一下,最近一次感到伤心或者失望的时候,是什么原因导致的?</s>
678
+ [INFO|training_args.py:1828] 2024-02-01 14:21:08,098 >> PyTorch: setting up devices
679
+
680
+ warnings.warn(
681
+ Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
682
+ [INFO|trainer.py:571] 2024-02-01 14:21:08,153 >> Using auto half precision backend
683
+ [2024-02-01 14:21:08,351] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed info: version=0.13.1, git-hash=unknown, git-branch=unknown
684
+
685
+ /home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/transformers/training_args.py:1741: FutureWarning: `--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead.
686
+ warnings.warn(
687
+
688
+
689
+ /home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/transformers/training_args.py:1741: FutureWarning: `--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead.
690
+ warnings.warn(
691
+ /home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/transformers/training_args.py:1741: FutureWarning: `--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead.
692
+ warnings.warn(
693
+ [2024-02-01 14:21:41,776] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Flops Profiler Enabled: False
694
+ [2024-02-01 14:21:41,778] [INFO] [logging.py:96:log_dist] [Rank 0] Using client Optimizer as basic optimizer
695
+ [2024-02-01 14:21:41,778] [INFO] [logging.py:96:log_dist] [Rank 0] Removing param_group that has no 'params' in the basic Optimizer
696
+ [2024-02-01 14:21:41,794] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Basic Optimizer = AdamW
697
+ [2024-02-01 14:21:41,794] [INFO] [utils.py:56:is_zero_supported_optimizer] Checking ZeRO support for optimizer=AdamW type=<class 'torch.optim.adamw.AdamW'>
698
+ [2024-02-01 14:21:41,794] [INFO] [logging.py:96:log_dist] [Rank 0] Creating torch.float16 ZeRO stage 2 optimizer
699
+ [2024-02-01 14:21:41,795] [INFO] [stage_1_and_2.py:143:__init__] Reduce bucket size 500000000
700
+ [2024-02-01 14:21:41,795] [INFO] [stage_1_and_2.py:144:__init__] Allgather bucket size 500000000
701
+ [2024-02-01 14:21:41,795] [INFO] [stage_1_and_2.py:145:__init__] CPU Offload: False
702
+ [2024-02-01 14:21:41,795] [INFO] [stage_1_and_2.py:146:__init__] Round robin gradient partitioning: False
703
+ [2024-02-01 14:22:01,253] [INFO] [utils.py:791:see_memory_usage] Before initializing optimizer states
704
+ [2024-02-01 14:22:01,254] [INFO] [utils.py:792:see_memory_usage] MA 22.12 GB Max_MA 25.72 GB CA 25.85 GB Max_CA 26 GB
705
+ [2024-02-01 14:22:01,254] [INFO] [utils.py:799:see_memory_usage] CPU Virtual Memory: used = 119.45 GB, percent = 12.4%
706
+ [2024-02-01 14:22:01,614] [INFO] [utils.py:791:see_memory_usage] After initializing optimizer states
707
+ [2024-02-01 14:22:01,615] [INFO] [utils.py:792:see_memory_usage] MA 36.53 GB Max_MA 50.95 GB CA 54.68 GB Max_CA 55 GB
708
+ [2024-02-01 14:22:01,615] [INFO] [utils.py:799:see_memory_usage] CPU Virtual Memory: used = 109.91 GB, percent = 11.4%
709
+ [2024-02-01 14:22:01,615] [INFO] [stage_1_and_2.py:533:__init__] optimizer state initialized
710
+ [2024-02-01 14:22:01,876] [INFO] [utils.py:791:see_memory_usage] After initializing ZeRO optimizer
711
+ [2024-02-01 14:22:01,877] [INFO] [utils.py:792:see_memory_usage] MA 36.53 GB Max_MA 36.53 GB CA 54.68 GB Max_CA 55 GB
712
+ [2024-02-01 14:22:01,878] [INFO] [utils.py:799:see_memory_usage] CPU Virtual Memory: used = 101.72 GB, percent = 10.5%
713
+ [2024-02-01 14:22:01,881] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Final Optimizer = AdamW
714
+ [2024-02-01 14:22:01,881] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed using client LR scheduler
715
+ [2024-02-01 14:22:01,881] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed LR Scheduler = None
716
+ [2024-02-01 14:22:01,881] [INFO] [logging.py:96:log_dist] [Rank 0] step=0, skipped=0, lr=[1e-06], mom=[(0.9, 0.999)]
717
+ [2024-02-01 14:22:01,883] [INFO] [config.py:984:print] DeepSpeedEngine configuration:
718
+ [2024-02-01 14:22:01,883] [INFO] [config.py:988:print] activation_checkpointing_config {
719
+ "partition_activations": false,
720
+ "contiguous_memory_optimization": false,
721
+ "cpu_checkpointing": false,
722
+ "number_checkpoints": null,
723
+ "synchronize_checkpoint_boundary": false,
724
+ "profile": false
725
+ }
726
+ [2024-02-01 14:22:01,883] [INFO] [config.py:988:print] aio_config ................... {'block_size': 1048576, 'queue_depth': 8, 'thread_count': 1, 'single_submit': False, 'overlap_events': True}
727
+ [2024-02-01 14:22:01,883] [INFO] [config.py:988:print] amp_enabled .................. False
728
+ [2024-02-01 14:22:01,883] [INFO] [config.py:988:print] amp_params ................... False
729
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] autotuning_config ............ {
730
+ "enabled": false,
731
+ "start_step": null,
732
+ "end_step": null,
733
+ "metric_path": null,
734
+ "arg_mappings": null,
735
+ "metric": "throughput",
736
+ "model_info": null,
737
+ "results_dir": "autotuning_results",
738
+ "exps_dir": "autotuning_exps",
739
+ "overwrite": true,
740
+ "fast": true,
741
+ "start_profile_step": 3,
742
+ "end_profile_step": 5,
743
+ "tuner_type": "gridsearch",
744
+ "tuner_early_stopping": 5,
745
+ "tuner_num_trials": 50,
746
+ "model_info_path": null,
747
+ "mp_size": 1,
748
+ "max_train_batch_size": null,
749
+ "min_train_batch_size": 1,
750
+ "max_train_micro_batch_size_per_gpu": 1.024000e+03,
751
+ "min_train_micro_batch_size_per_gpu": 1,
752
+ "num_tuning_micro_batch_sizes": 3
753
+ }
754
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] bfloat16_enabled ............. False
755
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] checkpoint_parallel_write_pipeline False
756
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] checkpoint_tag_validation_enabled True
757
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] checkpoint_tag_validation_fail False
758
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] comms_config ................. <deepspeed.comm.config.DeepSpeedCommsConfig object at 0x7f7f6152d840>
759
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] communication_data_type ...... None
760
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] compression_config ........... {'weight_quantization': {'shared_parameters': {'enabled': False, 'quantizer_kernel': False, 'schedule_offset': 0, 'quantize_groups': 1, 'quantize_verbose': False, 'quantization_type': 'symmetric', 'quantize_weight_in_forward': False, 'rounding': 'nearest', 'fp16_mixed_quantize': False, 'quantize_change_ratio': 0.001}, 'different_groups': {}}, 'activation_quantization': {'shared_parameters': {'enabled': False, 'quantization_type': 'symmetric', 'range_calibration': 'dynamic', 'schedule_offset': 1000}, 'different_groups': {}}, 'sparse_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'row_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'head_pruning': {'shared_parameters': {'enabled': False, 'method': 'topk', 'schedule_offset': 1000}, 'different_groups': {}}, 'channel_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'layer_reduction': {'enabled': False}}
761
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] curriculum_enabled_legacy .... False
762
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] curriculum_params_legacy ..... False
763
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] data_efficiency_config ....... {'enabled': False, 'seed': 1234, 'data_sampling': {'enabled': False, 'num_epochs': 1000, 'num_workers': 0, 'curriculum_learning': {'enabled': False}}, 'data_routing': {'enabled': False, 'random_ltd': {'enabled': False, 'layer_token_lr_schedule': {'enabled': False}}}}
764
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] data_efficiency_enabled ...... False
765
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] dataloader_drop_last ......... False
766
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] disable_allgather ............ False
767
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] dump_state ................... False
768
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] dynamic_loss_scale_args ...... {'init_scale': 65536, 'scale_window': 1000, 'delayed_shift': 2, 'consecutive_hysteresis': False, 'min_scale': 1}
769
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] eigenvalue_enabled ........... False
770
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] eigenvalue_gas_boundary_resolution 1
771
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] eigenvalue_layer_name ........ bert.encoder.layer
772
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] eigenvalue_layer_num ......... 0
773
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] eigenvalue_max_iter .......... 100
774
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] eigenvalue_stability ......... 1e-06
775
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] eigenvalue_tol ............... 0.01
776
+ [2024-02-01 14:22:01,884] [INFO] [config.py:988:print] eigenvalue_verbose ........... False
777
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] elasticity_enabled ........... False
778
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] flops_profiler_config ........ {
779
+ "enabled": false,
780
+ "recompute_fwd_factor": 0.0,
781
+ "profile_step": 1,
782
+ "module_depth": -1,
783
+ "top_modules": 1,
784
+ "detailed": true,
785
+ "output_file": null
786
+ }
787
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] fp16_auto_cast ............... False
788
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] fp16_enabled ................. True
789
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] fp16_master_weights_and_gradients False
790
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] global_rank .................. 0
791
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] grad_accum_dtype ............. None
792
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] gradient_accumulation_steps .. 28
793
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] gradient_clipping ............ 1.0
794
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] gradient_predivide_factor .... 1.0
795
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] graph_harvesting ............. False
796
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] hybrid_engine ................ enabled=False max_out_tokens=512 inference_tp_size=1 release_inference_cache=False pin_parameters=True tp_gather_partition_size=8
797
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] initial_dynamic_scale ........ 65536
798
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] load_universal_checkpoint .... False
799
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] loss_scale ................... 0
800
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] memory_breakdown ............. False
801
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] mics_hierarchial_params_gather False
802
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] mics_shard_size .............. -1
803
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] monitor_config ............... tensorboard=TensorBoardConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') wandb=WandbConfig(enabled=False, group=None, team=None, project='deepspeed') csv_monitor=CSVConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') enabled=False
804
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] nebula_config ................ {
805
+ "enabled": false,
806
+ "persistent_storage_path": null,
807
+ "persistent_time_interval": 100,
808
+ "num_of_version_in_retention": 2,
809
+ "enable_nebula_load": true,
810
+ "load_path": null
811
+ }
812
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] optimizer_legacy_fusion ...... False
813
+ [2024-02-01 14:22:01,885] [INFO] [config.py:988:print] optimizer_name ............... None
814
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] optimizer_params ............. None
815
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] pipeline ..................... {'stages': 'auto', 'partition': 'best', 'seed_layers': False, 'activation_checkpoint_interval': 0, 'pipe_partitioned': True, 'grad_partitioned': True}
816
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] pld_enabled .................. False
817
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] pld_params ................... False
818
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] prescale_gradients ........... False
819
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] scheduler_name ............... None
820
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] scheduler_params ............. None
821
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] seq_parallel_communication_data_type torch.float32
822
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] sparse_attention ............. None
823
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] sparse_gradients_enabled ..... False
824
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] steps_per_print .............. inf
825
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] train_batch_size ............. 448
826
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] train_micro_batch_size_per_gpu 4
827
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] use_data_before_expert_parallel_ False
828
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] use_node_local_storage ....... False
829
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] wall_clock_breakdown ......... False
830
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] weight_quantization_config ... None
831
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] world_size ................... 4
832
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] zero_allow_untested_optimizer True
833
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] zero_config .................. stage=2 contiguous_gradients=True reduce_scatter=True reduce_bucket_size=500000000 use_multi_rank_bucket_allreduce=True allgather_partitions=True allgather_bucket_size=500000000 overlap_comm=False load_from_fp32_weights=True elastic_checkpoint=False offload_param=None offload_optimizer=None sub_group_size=1,000,000,000 cpu_offload_param=None cpu_offload_use_pin_memory=None cpu_offload=None prefetch_bucket_size=50,000,000 param_persistence_threshold=100,000 model_persistence_threshold=sys.maxsize max_live_parameters=1,000,000,000 max_reuse_distance=1,000,000,000 gather_16bit_weights_on_model_save=False stage3_gather_fp16_weights_on_model_save=False ignore_unused_parameters=True legacy_stage1=False round_robin_gradients=False zero_hpz_partition_size=1 zero_quantized_weights=False zero_quantized_nontrainable_weights=False zero_quantized_gradients=False mics_shard_size=-1 mics_hierarchical_params_gather=False memory_efficient_linear=True pipeline_loading_checkpoint=False override_module_apply=True
834
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] zero_enabled ................. True
835
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] zero_force_ds_cpu_optimizer .. True
836
+ [2024-02-01 14:22:01,886] [INFO] [config.py:988:print] zero_optimization_stage ...... 2
837
+ [2024-02-01 14:22:01,887] [INFO] [config.py:974:print_user_config] json = {
838
+ "train_batch_size": 448,
839
+ "train_micro_batch_size_per_gpu": 4,
840
+ "gradient_accumulation_steps": 28,
841
+ "gradient_clipping": 1.0,
842
+ "zero_allow_untested_optimizer": true,
843
+ "fp16": {
844
+ "enabled": true,
845
+ "loss_scale": 0,
846
+ "initial_scale_power": 16,
847
+ "loss_scale_window": 1000,
848
+ "hysteresis": 2,
849
+ "min_loss_scale": 1
850
+ },
851
+ "zero_optimization": {
852
+ "stage": 2,
853
+ "allgather_partitions": true,
854
+ "allgather_bucket_size": 5.000000e+08,
855
+ "reduce_scatter": true,
856
+ "reduce_bucket_size": 5.000000e+08,
857
+ "overlap_comm": false,
858
+ "contiguous_gradients": true
859
+ },
860
+ "steps_per_print": inf,
861
+ "bf16": {
862
+ "enabled": false
863
+ }
864
+ }
865
+ [INFO|trainer.py:1721] 2024-02-01 14:22:01,887 >> ***** Running training *****
866
+ [INFO|trainer.py:1722] 2024-02-01 14:22:01,887 >> Num examples = 3,134
867
+ [INFO|trainer.py:1723] 2024-02-01 14:22:01,887 >> Num Epochs = 9
868
+ [INFO|trainer.py:1724] 2024-02-01 14:22:01,887 >> Instantaneous batch size per device = 4
869
+ [INFO|trainer.py:1727] 2024-02-01 14:22:01,887 >> Total train batch size (w. parallel, distributed & accumulation) = 448
870
+ [INFO|trainer.py:1728] 2024-02-01 14:22:01,887 >> Gradient Accumulation steps = 28
871
+ [INFO|trainer.py:1729] 2024-02-01 14:22:01,887 >> Total optimization steps = 63
872
+ [INFO|trainer.py:1730] 2024-02-01 14:22:01,889 >> Number of trainable parameters = 7,737,708,544
873
+
874
  0%| | 0/63 [00:00<?, ?it/s]/home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/torch/utils/checkpoint.py:429: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants.
875
+ warnings.warn(
876
+ /home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/torch/utils/checkpoint.py:429: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants.
877
+ warnings.warn(
878
+ /home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/torch/utils/checkpoint.py:429: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants.
879
+ warnings.warn(
880
+ /home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/torch/utils/checkpoint.py:429: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants.
881
+ warnings.warn(
882
+ /home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/deepspeed/runtime/zero/stage_1_and_2.py:1968: UserWarning: The torch.cuda.*DtypeTensor constructors are no longer recommended. It's best to use methods such as torch.tensor(data, dtype=*, device='cuda') to create tensors. (Triggered internally at ../torch/csrc/tensor/python_tensor.cpp:83.)
883
+ overflow_gpu = get_accelerator().ByteTensor([overflow])
884
+ /home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/deepspeed/runtime/zero/stage_1_and_2.py:1968: UserWarning: The torch.cuda.*DtypeTensor constructors are no longer recommended. It's best to use methods such as torch.tensor(data, dtype=*, device='cuda') to create tensors. (Triggered internally at ../torch/csrc/tensor/python_tensor.cpp:83.)
885
+ overflow_gpu = get_accelerator().ByteTensor([overflow])
886
+ /home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/deepspeed/runtime/zero/stage_1_and_2.py:1968: UserWarning: The torch.cuda.*DtypeTensor constructors are no longer recommended. It's best to use methods such as torch.tensor(data, dtype=*, device='cuda') to create tensors. (Triggered internally at ../torch/csrc/tensor/python_tensor.cpp:83.)
887
+ overflow_gpu = get_accelerator().ByteTensor([overflow])
888
+ /home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/deepspeed/runtime/zero/stage_1_and_2.py:1968: UserWarning: The torch.cuda.*DtypeTensor constructors are no longer recommended. It's best to use methods such as torch.tensor(data, dtype=*, device='cuda') to create tensors. (Triggered internally at ../torch/csrc/tensor/python_tensor.cpp:83.)
889
+ overflow_gpu = get_accelerator().ByteTensor([overflow])
890
+
891
  2%|▏ | 1/63 [00:44<45:32, 44.06s/it]
892
  3%|▎ | 2/63 [01:23<42:01, 41.33s/it]
893
  5%|▍ | 3/63 [02:04<41:04, 41.08s/it]
894
  6%|▋ | 4/63 [02:43<39:37, 40.30s/it]
895
  8%|▊ | 5/63 [03:22<38:23, 39.72s/it][2024-02-01 14:26:04,941] [INFO] [loss_scaler.py:190:update_scale] [deepspeed] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 65536, but hysteresis is 2. Reducing hysteresis to 1
896
+
897
  10%|▉ | 6/63 [04:03<38:08, 40.15s/it][2024-02-01 14:26:44,502] [INFO] [loss_scaler.py:183:update_scale] [deepspeed] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 65536, reducing to 32768
898
+
899
  11%|█ | 7/63 [04:42<37:17, 39.96s/it]
900
  13%|█▎ | 8/63 [05:21<36:23, 39.71s/it]
901
  14%|█▍ | 9/63 [06:01<35:39, 39.62s/it]
902
  16%|█▌ | 10/63 [06:41<35:16, 39.93s/it]
903
 
904
+
905
  16%|█▌ | 10/63 [06:41<35:16, 39.93s/it]
906
  17%|█▋ | 11/63 [07:20<34:22, 39.65s/it]
907
  19%|█▉ | 12/63 [08:00<33:47, 39.76s/it]
908
  21%|██ | 13/63 [08:39<32:56, 39.53s/it]
909
  22%|██▏ | 14/63 [09:20<32:32, 39.85s/it]
910
  24%|██▍ | 15/63 [09:59<31:45, 39.69s/it]
911
  25%|██▌ | 16/63 [10:38<30:47, 39.31s/it]
912
  27%|██▋ | 17/63 [11:19<30:31, 39.82s/it]
913
  29%|██▊ | 18/63 [11:58<29:51, 39.81s/it]
914
  30%|███ | 19/63 [12:39<29:15, 39.89s/it]
915
  32%|███▏ | 20/63 [13:19<28:42, 40.06s/it]
916
 
917
+
918
  32%|███▏ | 20/63 [13:19<28:42, 40.06s/it]
919
  33%|███▎ | 21/63 [13:59<27:59, 39.99s/it][INFO|trainer.py:2926] 2024-02-01 14:36:12,897 >> Saving model checkpoint to /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-21
920
+ [INFO|configuration_utils.py:473] 2024-02-01 14:36:12,902 >> Configuration saved in /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-21/config.json
921
+ [INFO|configuration_utils.py:594] 2024-02-01 14:36:12,903 >> Configuration saved in /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-21/generation_config.json
922
+ [INFO|modeling_utils.py:2503] 2024-02-01 14:36:40,422 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 4 checkpoint shards. You can find where each parameters has been saved in the index located at /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-21/model.safetensors.index.json.
923
+ [INFO|tokenization_utils_base.py:2433] 2024-02-01 14:36:40,424 >> tokenizer config file saved in /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-21/tokenizer_config.json
924
+ [INFO|tokenization_utils_base.py:2442] 2024-02-01 14:36:40,424 >> Special tokens file saved in /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-21/special_tokens_map.json
925
+ [2024-02-01 14:36:41,670] [INFO] [logging.py:96:log_dist] [Rank 0] [Torch] Checkpoint global_step21 is about to be saved!
926
+ /home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/torch/nn/modules/module.py:1879: UserWarning: Positional args are being deprecated, use kwargs instead. Refer to https://pytorch.org/docs/master/generated/torch.nn.Module.html#torch.nn.Module.state_dict for details.
927
+ warnings.warn(
928
+ /home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/torch/nn/modules/module.py:1879: UserWarning: Positional args are being deprecated, use kwargs instead. Refer to https://pytorch.org/docs/master/generated/torch.nn.Module.html#torch.nn.Module.state_dict for details.
929
+ warnings.warn(
930
+ /home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/torch/nn/modules/module.py:1879: UserWarning: Positional args are being deprecated, use kwargs instead. Refer to https://pytorch.org/docs/master/generated/torch.nn.Module.html#torch.nn.Module.state_dict for details.
931
+ warnings.warn(
932
+ /home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/torch/nn/modules/module.py:1879: UserWarning: Positional args are being deprecated, use kwargs instead. Refer to https://pytorch.org/docs/master/generated/torch.nn.Module.html#torch.nn.Module.state_dict for details.
933
+ warnings.warn(
934
+ [2024-02-01 14:36:41,683] [INFO] [logging.py:96:log_dist] [Rank 0] Saving model checkpoint: /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-21/global_step21/mp_rank_00_model_states.pt
935
+ [2024-02-01 14:36:41,684] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-21/global_step21/mp_rank_00_model_states.pt...
936
+ [2024-02-01 14:37:17,058] [INFO] [torch_checkpoint_engine.py:23:save] [Torch] Saved /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-21/global_step21/mp_rank_00_model_states.pt.
937
+ [2024-02-01 14:37:17,061] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-21/global_step21/zero_pp_rank_0_mp_rank_00_optim_states.pt...
938
+ [2024-02-01 14:38:15,362] [INFO] [torch_checkpoint_engine.py:23:save] [Torch] Saved /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-21/global_step21/zero_pp_rank_0_mp_rank_00_optim_states.pt.
939
+ [2024-02-01 14:38:15,363] [INFO] [engine.py:3477:_save_zero_checkpoint] zero checkpoint saved /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-21/global_step21/zero_pp_rank_0_mp_rank_00_optim_states.pt
940
+ [2024-02-01 14:38:15,363] [INFO] [torch_checkpoint_engine.py:33:commit] [Torch] Checkpoint global_step21 is ready now!
941
+ /home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/torch/utils/checkpoint.py:429: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants.
942
+ warnings.warn(
943
+
944
  35%|███▍ | 22/63 [16:52<54:35, 79.88s/it]
945
  37%|███▋ | 23/63 [17:32<45:23, 68.08s/it]
946
  38%|███▊ | 24/63 [18:15<39:17, 60.44s/it]
947
  40%|███▉ | 25/63 [18:54<34:12, 54.02s/it]
948
  41%|████▏ | 26/63 [19:33<30:33, 49.54s/it]
949
  43%|████▎ | 27/63 [20:12<27:49, 46.38s/it]
950
  44%|████▍ | 28/63 [20:51<25:45, 44.17s/it]
951
  46%|████▌ | 29/63 [21:31<24:19, 42.92s/it]
952
  48%|████▊ | 30/63 [22:11<23:07, 42.06s/it]
953
 
954
+
955
  48%|████▊ | 30/63 [22:11<23:07, 42.06s/it]
956
  49%|████▉ | 31/63 [22:52<22:17, 41.80s/it]
957
  51%|█████ | 32/63 [23:32<21:11, 41.02s/it]
958
  52%|█████▏ | 33/63 [24:10<20:06, 40.20s/it]
959
  54%|█████▍ | 34/63 [24:49<19:18, 39.96s/it]
960
  56%|█████▌ | 35/63 [25:30<18:43, 40.11s/it]
961
  57%|█████▋ | 36/63 [26:10<18:03, 40.13s/it]
962
  59%|█████▊ | 37/63 [26:49<17:12, 39.70s/it]
963
  60%|██████ | 38/63 [27:29<16:36, 39.88s/it]
964
  62%|██████▏ | 39/63 [28:08<15:48, 39.51s/it]
965
  63%|██████▎ | 40/63 [28:46<15:04, 39.34s/it]
966
 
967
+
968
  63%|██████▎ | 40/63 [28:46<15:04, 39.34s/it]
969
  65%|██████▌ | 41/63 [29:27<14:36, 39.84s/it]
970
  67%|██████▋ | 42/63 [30:07<13:57, 39.87s/it][INFO|trainer.py:2926] 2024-02-01 14:52:21,426 >> Saving model checkpoint to /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-42
971
+ [INFO|configuration_utils.py:473] 2024-02-01 14:52:21,431 >> Configuration saved in /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-42/config.json
972
+ [INFO|configuration_utils.py:594] 2024-02-01 14:52:21,432 >> Configuration saved in /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-42/generation_config.json
973
+ [INFO|modeling_utils.py:2503] 2024-02-01 14:52:48,702 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 4 checkpoint shards. You can find where each parameters has been saved in the index located at /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-42/model.safetensors.index.json.
974
+ [INFO|tokenization_utils_base.py:2433] 2024-02-01 14:52:48,704 >> tokenizer config file saved in /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-42/tokenizer_config.json
975
+ [INFO|tokenization_utils_base.py:2442] 2024-02-01 14:52:48,704 >> Special tokens file saved in /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-42/special_tokens_map.json
976
+ [2024-02-01 14:52:49,843] [INFO] [logging.py:96:log_dist] [Rank 0] [Torch] Checkpoint global_step42 is about to be saved!
977
+ /home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/torch/nn/modules/module.py:1879: UserWarning: Positional args are being deprecated, use kwargs instead. Refer to https://pytorch.org/docs/master/generated/torch.nn.Module.html#torch.nn.Module.state_dict for details.
978
+ warnings.warn(
979
+ [2024-02-01 14:52:49,856] [INFO] [logging.py:96:log_dist] [Rank 0] Saving model checkpoint: /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-42/global_step42/mp_rank_00_model_states.pt
980
+ [2024-02-01 14:52:49,856] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-42/global_step42/mp_rank_00_model_states.pt...
981
+ [2024-02-01 14:53:25,041] [INFO] [torch_checkpoint_engine.py:23:save] [Torch] Saved /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-42/global_step42/mp_rank_00_model_states.pt.
982
+ [2024-02-01 14:53:25,044] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-42/global_step42/zero_pp_rank_0_mp_rank_00_optim_states.pt...
983
+ [2024-02-01 14:54:24,364] [INFO] [torch_checkpoint_engine.py:23:save] [Torch] Saved /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-42/global_step42/zero_pp_rank_0_mp_rank_00_optim_states.pt.
984
+ [2024-02-01 14:54:24,364] [INFO] [engine.py:3477:_save_zero_checkpoint] zero checkpoint saved /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-42/global_step42/zero_pp_rank_0_mp_rank_00_optim_states.pt
985
+ [2024-02-01 14:54:24,364] [INFO] [torch_checkpoint_engine.py:33:commit] [Torch] Checkpoint global_step42 is ready now!
986
+ /home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/torch/utils/checkpoint.py:429: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants.
987
+ warnings.warn(
988
+
989
  68%|██████▊ | 43/63 [33:01<26:37, 79.86s/it]
990
  70%|██████▉ | 44/63 [33:41<21:31, 67.96s/it]
991
  71%|███████▏ | 45/63 [34:20<17:47, 59.29s/it]
992
  73%|███████▎ | 46/63 [35:01<15:13, 53.75s/it]
993
  75%|███████▍ | 47/63 [35:41<13:13, 49.58s/it]
994
  76%|███████▌ | 48/63 [36:21<11:40, 46.71s/it]
995
  78%|███████▊ | 49/63 [37:00<10:25, 44.69s/it]
996
  79%|███████▉ | 50/63 [37:42<09:26, 43.60s/it]
997
 
998
+
999
  79%|███████▉ | 50/63 [37:42<09:26, 43.60s/it]
1000
  81%|████████ | 51/63 [38:20<08:24, 42.07s/it]
1001
  83%|████████▎ | 52/63 [39:00<07:34, 41.29s/it]
1002
  84%|████████▍ | 53/63 [39:41<06:52, 41.22s/it]
1003
  86%|████████▌ | 54/63 [40:21<06:07, 40.87s/it]
1004
  87%|████████▋ | 55/63 [41:00<05:22, 40.28s/it]
1005
  89%|████████▉ | 56/63 [41:38<04:39, 39.88s/it]
1006
  90%|█████████ | 57/63 [42:18<03:58, 39.78s/it]
1007
  92%|█████████▏| 58/63 [42:59<03:20, 40.10s/it]
1008
  94%|█████████▎| 59/63 [43:39<02:40, 40.24s/it]
1009
  95%|█████████▌| 60/63 [44:19<02:00, 40.04s/it]
1010
 
1011
+
1012
  95%|█████████▌| 60/63 [44:19<02:00, 40.04s/it]
1013
  97%|█████████▋| 61/63 [44:58<01:19, 39.67s/it]
1014
  98%|█████████▊| 62/63 [45:38<00:39, 39.80s/it]
1015
+ [INFO|configuration_utils.py:473] 2024-02-01 15:08:30,328 >> Configuration saved in /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-63/config.json
1016
+ [INFO|configuration_utils.py:594] 2024-02-01 15:08:30,329 >> Configuration saved in /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-63/generation_config.json
1017
+ [INFO|modeling_utils.py:2503] 2024-02-01 15:08:57,391 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 4 checkpoint shards. You can find where each parameters has been saved in the index located at /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-63/model.safetensors.index.json.
1018
+ [INFO|tokenization_utils_base.py:2433] 2024-02-01 15:08:57,393 >> tokenizer config file saved in /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-63/tokenizer_config.json
1019
+ [INFO|tokenization_utils_base.py:2442] 2024-02-01 15:08:57,393 >> Special tokens file saved in /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-63/special_tokens_map.json
1020
+ [2024-02-01 15:08:58,595] [INFO] [logging.py:96:log_dist] [Rank 0] [Torch] Checkpoint global_step63 is about to be saved!
1021
+ /home/lirenhao/anaconda3/envs/llama_factory/lib/python3.10/site-packages/torch/nn/modules/module.py:1879: UserWarning: Positional args are being deprecated, use kwargs instead. Refer to https://pytorch.org/docs/master/generated/torch.nn.Module.html#torch.nn.Module.state_dict for details.
1022
+ warnings.warn(
1023
+ [2024-02-01 15:08:58,608] [INFO] [logging.py:96:log_dist] [Rank 0] Saving model checkpoint: /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-63/global_step63/mp_rank_00_model_states.pt
1024
+ [2024-02-01 15:08:58,608] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-63/global_step63/mp_rank_00_model_states.pt...
1025
+ [2024-02-01 15:09:33,948] [INFO] [torch_checkpoint_engine.py:23:save] [Torch] Saved /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-63/global_step63/mp_rank_00_model_states.pt.
1026
+ [2024-02-01 15:09:33,951] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-63/global_step63/zero_pp_rank_0_mp_rank_00_optim_states.pt...
1027
+ [2024-02-01 15:10:31,865] [INFO] [torch_checkpoint_engine.py:23:save] [Torch] Saved /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-63/global_step63/zero_pp_rank_0_mp_rank_00_optim_states.pt.
1028
+ [2024-02-01 15:10:31,866] [INFO] [engine.py:3477:_save_zero_checkpoint] zero checkpoint saved /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tmp-checkpoint-63/global_step63/zero_pp_rank_0_mp_rank_00_optim_states.pt
1029
+ [2024-02-01 15:10:31,866] [INFO] [torch_checkpoint_engine.py:33:commit] [Torch] Checkpoint global_step63 is ready now!
1030
+ [INFO|trainer.py:1962] 2024-02-01 15:10:32,863 >>
1031
+
1032
+ Training completed. Do not forget to share your model on huggingface.co/models =)
1033
+
1034
+
1035
+
1036
 
1037
+
1038
+ [INFO|trainer.py:2926] 2024-02-01 15:10:44,639 >> Saving model checkpoint to /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9
1039
+ [INFO|configuration_utils.py:473] 2024-02-01 15:10:44,787 >> Configuration saved in /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/config.json
1040
+ [INFO|configuration_utils.py:594] 2024-02-01 15:10:44,788 >> Configuration saved in /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/generation_config.json
1041
+ [2024-02-01 15:10:49,742] [INFO] [launch.py:347:main] Process 3771596 exits successfully.
1042
+ [2024-02-01 15:10:49,742] [INFO] [launch.py:347:main] Process 3771597 exits successfully.
1043
+ [2024-02-01 15:10:49,742] [INFO] [launch.py:347:main] Process 3771598 exits successfully.
1044
+ [INFO|modeling_utils.py:2503] 2024-02-01 15:11:12,707 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 4 checkpoint shards. You can find where each parameters has been saved in the index located at /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/model.safetensors.index.json.
1045
+ [INFO|tokenization_utils_base.py:2433] 2024-02-01 15:11:12,709 >> tokenizer config file saved in /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/tokenizer_config.json
1046
+ [INFO|tokenization_utils_base.py:2442] 2024-02-01 15:11:12,709 >> Special tokens file saved in /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/special_tokens_map.json
1047
+ ***** train metrics *****
1048
+ epoch = 9.0
1049
+ train_loss = 1.4982
1050
+ train_runtime = 0:48:30.97
1051
+ train_samples_per_second = 9.69
1052
+ train_steps_per_second = 0.022
1053
+ Figure saved: /home/lirenhao/projects/LLaMA-Factory/output/9f100e26-d997-46e8-afee-721977a16ca9/training_loss.png
1054
+ 02/01/2024 15:11:14 - WARNING - llmtuner.extras.ploting - No metric eval_loss to plot.
1055
+ [INFO|modelcard.py:452] 2024-02-01 15:11:14,095 >> Dropping the following result as it does not have all the necessary fields:
1056
+ {'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
1057
+ [2024-02-01 15:11:17,773] [INFO] [launch.py:347:main] Process 3771595 exits successfully.
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14397212a196150de59782fb606e42dd9cac1940623625bfef4d77dafd0faa66
3
+ size 4885470560
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db2e13cdd738cfbb7eee70e1999e01746d3f84ff978ac7083fcce11f49859c4e
3
+ size 4915913344
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bc3d5d1a4c26135bdb747af7aecc19cf9507f239c48e515351d699d4d0188ad
3
+ size 4915938224
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8938692be133cc903cab9d3e5a45bfd1b010667455b23bbf654d4cb00eebc958
3
+ size 758120576
model.safetensors.index.json ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 15475417088
4
+ },
5
+ "weight_map": {
6
+ "model.layers.0.attention.wo.weight": "model-00001-of-00004.safetensors",
7
+ "model.layers.0.attention.wqkv.weight": "model-00001-of-00004.safetensors",
8
+ "model.layers.0.attention_norm.weight": "model-00001-of-00004.safetensors",
9
+ "model.layers.0.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
10
+ "model.layers.0.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
11
+ "model.layers.0.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.ffn_norm.weight": "model-00001-of-00004.safetensors",
13
+ "model.layers.1.attention.wo.weight": "model-00001-of-00004.safetensors",
14
+ "model.layers.1.attention.wqkv.weight": "model-00001-of-00004.safetensors",
15
+ "model.layers.1.attention_norm.weight": "model-00001-of-00004.safetensors",
16
+ "model.layers.1.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
17
+ "model.layers.1.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
18
+ "model.layers.1.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
19
+ "model.layers.1.ffn_norm.weight": "model-00001-of-00004.safetensors",
20
+ "model.layers.10.attention.wo.weight": "model-00002-of-00004.safetensors",
21
+ "model.layers.10.attention.wqkv.weight": "model-00002-of-00004.safetensors",
22
+ "model.layers.10.attention_norm.weight": "model-00002-of-00004.safetensors",
23
+ "model.layers.10.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
24
+ "model.layers.10.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
25
+ "model.layers.10.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
26
+ "model.layers.10.ffn_norm.weight": "model-00002-of-00004.safetensors",
27
+ "model.layers.11.attention.wo.weight": "model-00002-of-00004.safetensors",
28
+ "model.layers.11.attention.wqkv.weight": "model-00002-of-00004.safetensors",
29
+ "model.layers.11.attention_norm.weight": "model-00002-of-00004.safetensors",
30
+ "model.layers.11.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
31
+ "model.layers.11.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
32
+ "model.layers.11.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
33
+ "model.layers.11.ffn_norm.weight": "model-00002-of-00004.safetensors",
34
+ "model.layers.12.attention.wo.weight": "model-00002-of-00004.safetensors",
35
+ "model.layers.12.attention.wqkv.weight": "model-00002-of-00004.safetensors",
36
+ "model.layers.12.attention_norm.weight": "model-00002-of-00004.safetensors",
37
+ "model.layers.12.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
38
+ "model.layers.12.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
39
+ "model.layers.12.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
40
+ "model.layers.12.ffn_norm.weight": "model-00002-of-00004.safetensors",
41
+ "model.layers.13.attention.wo.weight": "model-00002-of-00004.safetensors",
42
+ "model.layers.13.attention.wqkv.weight": "model-00002-of-00004.safetensors",
43
+ "model.layers.13.attention_norm.weight": "model-00002-of-00004.safetensors",
44
+ "model.layers.13.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
45
+ "model.layers.13.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
46
+ "model.layers.13.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
47
+ "model.layers.13.ffn_norm.weight": "model-00002-of-00004.safetensors",
48
+ "model.layers.14.attention.wo.weight": "model-00002-of-00004.safetensors",
49
+ "model.layers.14.attention.wqkv.weight": "model-00002-of-00004.safetensors",
50
+ "model.layers.14.attention_norm.weight": "model-00002-of-00004.safetensors",
51
+ "model.layers.14.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
52
+ "model.layers.14.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
53
+ "model.layers.14.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
54
+ "model.layers.14.ffn_norm.weight": "model-00002-of-00004.safetensors",
55
+ "model.layers.15.attention.wo.weight": "model-00002-of-00004.safetensors",
56
+ "model.layers.15.attention.wqkv.weight": "model-00002-of-00004.safetensors",
57
+ "model.layers.15.attention_norm.weight": "model-00002-of-00004.safetensors",
58
+ "model.layers.15.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
59
+ "model.layers.15.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
60
+ "model.layers.15.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
61
+ "model.layers.15.ffn_norm.weight": "model-00002-of-00004.safetensors",
62
+ "model.layers.16.attention.wo.weight": "model-00002-of-00004.safetensors",
63
+ "model.layers.16.attention.wqkv.weight": "model-00002-of-00004.safetensors",
64
+ "model.layers.16.attention_norm.weight": "model-00002-of-00004.safetensors",
65
+ "model.layers.16.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
66
+ "model.layers.16.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
67
+ "model.layers.16.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
68
+ "model.layers.16.ffn_norm.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.17.attention.wo.weight": "model-00002-of-00004.safetensors",
70
+ "model.layers.17.attention.wqkv.weight": "model-00002-of-00004.safetensors",
71
+ "model.layers.17.attention_norm.weight": "model-00002-of-00004.safetensors",
72
+ "model.layers.17.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
73
+ "model.layers.17.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
74
+ "model.layers.17.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
75
+ "model.layers.17.ffn_norm.weight": "model-00002-of-00004.safetensors",
76
+ "model.layers.18.attention.wo.weight": "model-00002-of-00004.safetensors",
77
+ "model.layers.18.attention.wqkv.weight": "model-00002-of-00004.safetensors",
78
+ "model.layers.18.attention_norm.weight": "model-00002-of-00004.safetensors",
79
+ "model.layers.18.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
80
+ "model.layers.18.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
81
+ "model.layers.18.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
82
+ "model.layers.18.ffn_norm.weight": "model-00002-of-00004.safetensors",
83
+ "model.layers.19.attention.wo.weight": "model-00002-of-00004.safetensors",
84
+ "model.layers.19.attention.wqkv.weight": "model-00002-of-00004.safetensors",
85
+ "model.layers.19.attention_norm.weight": "model-00002-of-00004.safetensors",
86
+ "model.layers.19.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
87
+ "model.layers.19.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
88
+ "model.layers.19.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
89
+ "model.layers.19.ffn_norm.weight": "model-00002-of-00004.safetensors",
90
+ "model.layers.2.attention.wo.weight": "model-00001-of-00004.safetensors",
91
+ "model.layers.2.attention.wqkv.weight": "model-00001-of-00004.safetensors",
92
+ "model.layers.2.attention_norm.weight": "model-00001-of-00004.safetensors",
93
+ "model.layers.2.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
94
+ "model.layers.2.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
95
+ "model.layers.2.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
96
+ "model.layers.2.ffn_norm.weight": "model-00001-of-00004.safetensors",
97
+ "model.layers.20.attention.wo.weight": "model-00002-of-00004.safetensors",
98
+ "model.layers.20.attention.wqkv.weight": "model-00002-of-00004.safetensors",
99
+ "model.layers.20.attention_norm.weight": "model-00003-of-00004.safetensors",
100
+ "model.layers.20.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
101
+ "model.layers.20.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
102
+ "model.layers.20.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
103
+ "model.layers.20.ffn_norm.weight": "model-00003-of-00004.safetensors",
104
+ "model.layers.21.attention.wo.weight": "model-00003-of-00004.safetensors",
105
+ "model.layers.21.attention.wqkv.weight": "model-00003-of-00004.safetensors",
106
+ "model.layers.21.attention_norm.weight": "model-00003-of-00004.safetensors",
107
+ "model.layers.21.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
108
+ "model.layers.21.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
109
+ "model.layers.21.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
110
+ "model.layers.21.ffn_norm.weight": "model-00003-of-00004.safetensors",
111
+ "model.layers.22.attention.wo.weight": "model-00003-of-00004.safetensors",
112
+ "model.layers.22.attention.wqkv.weight": "model-00003-of-00004.safetensors",
113
+ "model.layers.22.attention_norm.weight": "model-00003-of-00004.safetensors",
114
+ "model.layers.22.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
115
+ "model.layers.22.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
116
+ "model.layers.22.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
117
+ "model.layers.22.ffn_norm.weight": "model-00003-of-00004.safetensors",
118
+ "model.layers.23.attention.wo.weight": "model-00003-of-00004.safetensors",
119
+ "model.layers.23.attention.wqkv.weight": "model-00003-of-00004.safetensors",
120
+ "model.layers.23.attention_norm.weight": "model-00003-of-00004.safetensors",
121
+ "model.layers.23.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
122
+ "model.layers.23.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
123
+ "model.layers.23.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
124
+ "model.layers.23.ffn_norm.weight": "model-00003-of-00004.safetensors",
125
+ "model.layers.24.attention.wo.weight": "model-00003-of-00004.safetensors",
126
+ "model.layers.24.attention.wqkv.weight": "model-00003-of-00004.safetensors",
127
+ "model.layers.24.attention_norm.weight": "model-00003-of-00004.safetensors",
128
+ "model.layers.24.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
129
+ "model.layers.24.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
130
+ "model.layers.24.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
131
+ "model.layers.24.ffn_norm.weight": "model-00003-of-00004.safetensors",
132
+ "model.layers.25.attention.wo.weight": "model-00003-of-00004.safetensors",
133
+ "model.layers.25.attention.wqkv.weight": "model-00003-of-00004.safetensors",
134
+ "model.layers.25.attention_norm.weight": "model-00003-of-00004.safetensors",
135
+ "model.layers.25.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
136
+ "model.layers.25.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
137
+ "model.layers.25.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
138
+ "model.layers.25.ffn_norm.weight": "model-00003-of-00004.safetensors",
139
+ "model.layers.26.attention.wo.weight": "model-00003-of-00004.safetensors",
140
+ "model.layers.26.attention.wqkv.weight": "model-00003-of-00004.safetensors",
141
+ "model.layers.26.attention_norm.weight": "model-00003-of-00004.safetensors",
142
+ "model.layers.26.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
143
+ "model.layers.26.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
144
+ "model.layers.26.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
145
+ "model.layers.26.ffn_norm.weight": "model-00003-of-00004.safetensors",
146
+ "model.layers.27.attention.wo.weight": "model-00003-of-00004.safetensors",
147
+ "model.layers.27.attention.wqkv.weight": "model-00003-of-00004.safetensors",
148
+ "model.layers.27.attention_norm.weight": "model-00003-of-00004.safetensors",
149
+ "model.layers.27.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
150
+ "model.layers.27.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
151
+ "model.layers.27.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
152
+ "model.layers.27.ffn_norm.weight": "model-00003-of-00004.safetensors",
153
+ "model.layers.28.attention.wo.weight": "model-00003-of-00004.safetensors",
154
+ "model.layers.28.attention.wqkv.weight": "model-00003-of-00004.safetensors",
155
+ "model.layers.28.attention_norm.weight": "model-00003-of-00004.safetensors",
156
+ "model.layers.28.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
157
+ "model.layers.28.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
158
+ "model.layers.28.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
159
+ "model.layers.28.ffn_norm.weight": "model-00003-of-00004.safetensors",
160
+ "model.layers.29.attention.wo.weight": "model-00003-of-00004.safetensors",
161
+ "model.layers.29.attention.wqkv.weight": "model-00003-of-00004.safetensors",
162
+ "model.layers.29.attention_norm.weight": "model-00003-of-00004.safetensors",
163
+ "model.layers.29.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
164
+ "model.layers.29.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
165
+ "model.layers.29.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
166
+ "model.layers.29.ffn_norm.weight": "model-00003-of-00004.safetensors",
167
+ "model.layers.3.attention.wo.weight": "model-00001-of-00004.safetensors",
168
+ "model.layers.3.attention.wqkv.weight": "model-00001-of-00004.safetensors",
169
+ "model.layers.3.attention_norm.weight": "model-00001-of-00004.safetensors",
170
+ "model.layers.3.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
171
+ "model.layers.3.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
172
+ "model.layers.3.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
173
+ "model.layers.3.ffn_norm.weight": "model-00001-of-00004.safetensors",
174
+ "model.layers.30.attention.wo.weight": "model-00003-of-00004.safetensors",
175
+ "model.layers.30.attention.wqkv.weight": "model-00003-of-00004.safetensors",
176
+ "model.layers.30.attention_norm.weight": "model-00003-of-00004.safetensors",
177
+ "model.layers.30.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
178
+ "model.layers.30.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
179
+ "model.layers.30.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
180
+ "model.layers.30.ffn_norm.weight": "model-00003-of-00004.safetensors",
181
+ "model.layers.31.attention.wo.weight": "model-00003-of-00004.safetensors",
182
+ "model.layers.31.attention.wqkv.weight": "model-00003-of-00004.safetensors",
183
+ "model.layers.31.attention_norm.weight": "model-00003-of-00004.safetensors",
184
+ "model.layers.31.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
185
+ "model.layers.31.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
186
+ "model.layers.31.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
187
+ "model.layers.31.ffn_norm.weight": "model-00003-of-00004.safetensors",
188
+ "model.layers.4.attention.wo.weight": "model-00001-of-00004.safetensors",
189
+ "model.layers.4.attention.wqkv.weight": "model-00001-of-00004.safetensors",
190
+ "model.layers.4.attention_norm.weight": "model-00001-of-00004.safetensors",
191
+ "model.layers.4.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
192
+ "model.layers.4.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
193
+ "model.layers.4.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
194
+ "model.layers.4.ffn_norm.weight": "model-00001-of-00004.safetensors",
195
+ "model.layers.5.attention.wo.weight": "model-00001-of-00004.safetensors",
196
+ "model.layers.5.attention.wqkv.weight": "model-00001-of-00004.safetensors",
197
+ "model.layers.5.attention_norm.weight": "model-00001-of-00004.safetensors",
198
+ "model.layers.5.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
199
+ "model.layers.5.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
200
+ "model.layers.5.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
201
+ "model.layers.5.ffn_norm.weight": "model-00001-of-00004.safetensors",
202
+ "model.layers.6.attention.wo.weight": "model-00001-of-00004.safetensors",
203
+ "model.layers.6.attention.wqkv.weight": "model-00001-of-00004.safetensors",
204
+ "model.layers.6.attention_norm.weight": "model-00001-of-00004.safetensors",
205
+ "model.layers.6.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
206
+ "model.layers.6.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
207
+ "model.layers.6.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
208
+ "model.layers.6.ffn_norm.weight": "model-00001-of-00004.safetensors",
209
+ "model.layers.7.attention.wo.weight": "model-00001-of-00004.safetensors",
210
+ "model.layers.7.attention.wqkv.weight": "model-00001-of-00004.safetensors",
211
+ "model.layers.7.attention_norm.weight": "model-00001-of-00004.safetensors",
212
+ "model.layers.7.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
213
+ "model.layers.7.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
214
+ "model.layers.7.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
215
+ "model.layers.7.ffn_norm.weight": "model-00001-of-00004.safetensors",
216
+ "model.layers.8.attention.wo.weight": "model-00001-of-00004.safetensors",
217
+ "model.layers.8.attention.wqkv.weight": "model-00001-of-00004.safetensors",
218
+ "model.layers.8.attention_norm.weight": "model-00001-of-00004.safetensors",
219
+ "model.layers.8.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
220
+ "model.layers.8.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
221
+ "model.layers.8.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
222
+ "model.layers.8.ffn_norm.weight": "model-00001-of-00004.safetensors",
223
+ "model.layers.9.attention.wo.weight": "model-00001-of-00004.safetensors",
224
+ "model.layers.9.attention.wqkv.weight": "model-00001-of-00004.safetensors",
225
+ "model.layers.9.attention_norm.weight": "model-00002-of-00004.safetensors",
226
+ "model.layers.9.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
227
+ "model.layers.9.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
228
+ "model.layers.9.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
229
+ "model.layers.9.ffn_norm.weight": "model-00002-of-00004.safetensors",
230
+ "model.norm.weight": "model-00003-of-00004.safetensors",
231
+ "model.tok_embeddings.weight": "model-00001-of-00004.safetensors",
232
+ "output.weight": "model-00004-of-00004.safetensors"
233
+ }
234
+ }
modeling_internlm2.py ADDED
@@ -0,0 +1,1391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/modeling_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ PyTorch InternLM2 model."""
17
+ import math
18
+ import queue
19
+ import threading
20
+ import warnings
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.nn.functional as F
25
+ import torch.utils.checkpoint
26
+ from einops import rearrange
27
+ from torch import nn
28
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
29
+ from transformers.activations import ACT2FN
30
+ from transformers.modeling_outputs import (
31
+ BaseModelOutputWithPast,
32
+ CausalLMOutputWithPast,
33
+ SequenceClassifierOutputWithPast,
34
+ )
35
+ from transformers.modeling_utils import PreTrainedModel
36
+ from transformers.utils import (
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ logging,
40
+ replace_return_docstrings,
41
+ )
42
+
43
+ try:
44
+ from transformers.generation.streamers import BaseStreamer
45
+ except: # noqa # pylint: disable=bare-except
46
+ BaseStreamer = None
47
+
48
+ from .configuration_internlm2 import InternLM2Config
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CONFIG_FOR_DOC = "InternLM2Config"
53
+
54
+ flash_attn_func, flash_attn_varlen_func = None, None
55
+ pad_input, index_first_axis, unpad_input = None, None, None
56
+ def _import_flash_attn():
57
+ global flash_attn_func, flash_attn_varlen_func
58
+ global pad_input, index_first_axis, unpad_input
59
+ try:
60
+ from flash_attn import flash_attn_func as _flash_attn_func, flash_attn_varlen_func as _flash_attn_varlen_func
61
+ from flash_attn.bert_padding import pad_input as _pad_input, index_first_axis as _index_first_axis, unpad_input as _unpad_input
62
+ flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
63
+ pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
64
+ except ImportError:
65
+ raise ImportError("flash_attn is not installed.")
66
+
67
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
68
+ def _get_unpad_data(attention_mask):
69
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
70
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
71
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
72
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
73
+ return (
74
+ indices,
75
+ cu_seqlens,
76
+ max_seqlen_in_batch,
77
+ )
78
+
79
+
80
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
81
+ def _make_causal_mask(
82
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
83
+ ):
84
+ """
85
+ Make causal mask used for bi-directional self-attention.
86
+ """
87
+ bsz, tgt_len = input_ids_shape
88
+ mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
89
+ mask_cond = torch.arange(mask.size(-1), device=device)
90
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
91
+ mask = mask.to(dtype)
92
+
93
+ if past_key_values_length > 0:
94
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
95
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
96
+
97
+
98
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
99
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
100
+ """
101
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
102
+ """
103
+ bsz, src_len = mask.size()
104
+ tgt_len = tgt_len if tgt_len is not None else src_len
105
+
106
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
107
+
108
+ inverted_mask = 1.0 - expanded_mask
109
+
110
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
111
+
112
+
113
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->InternLM2
114
+ class InternLM2RMSNorm(nn.Module):
115
+ def __init__(self, hidden_size, eps=1e-6):
116
+ """
117
+ InternLM2RMSNorm is equivalent to T5LayerNorm
118
+ """
119
+ super().__init__()
120
+ self.weight = nn.Parameter(torch.ones(hidden_size))
121
+ self.variance_epsilon = eps
122
+
123
+ def forward(self, hidden_states):
124
+ input_dtype = hidden_states.dtype
125
+ hidden_states = hidden_states.to(torch.float32)
126
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
127
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
128
+ return self.weight * hidden_states.to(input_dtype)
129
+
130
+
131
+ # Copied from transformers.model.llama.modeling_llama.LlamaRotaryEmbedding with Llama->InternLM2
132
+ class InternLM2RotaryEmbedding(nn.Module):
133
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
134
+ super().__init__()
135
+
136
+ self.dim = dim
137
+ self.max_position_embeddings = max_position_embeddings
138
+ self.base = base
139
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
140
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
141
+
142
+ # Build here to make `torch.jit.trace` work.
143
+ self._set_cos_sin_cache(
144
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
145
+ )
146
+
147
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
148
+ self.max_seq_len_cached = seq_len
149
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
150
+
151
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
152
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
153
+ emb = torch.cat((freqs, freqs), dim=-1)
154
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
155
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
156
+
157
+ def forward(self, x, seq_len=None):
158
+ # x: [bs, num_attention_heads, seq_len, head_size]
159
+ if seq_len > self.max_seq_len_cached:
160
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=torch.float32)
161
+
162
+ return (
163
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
164
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
165
+ )
166
+
167
+
168
+ # Copied from transformers.model.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->InternLM2
169
+ class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding):
170
+ """InternLM2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
171
+
172
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
173
+ self.scaling_factor = scaling_factor
174
+ super().__init__(dim, max_position_embeddings, base, device)
175
+
176
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
177
+ self.max_seq_len_cached = seq_len
178
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
179
+ t = t / self.scaling_factor
180
+
181
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
182
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
183
+ emb = torch.cat((freqs, freqs), dim=-1)
184
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
185
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
186
+
187
+
188
+ # Copied from transformers.model.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->InternLM2
189
+ class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding):
190
+ """InternLM2RotaryEmbedding extended with Dynamic NTK scaling.
191
+ Credits to the Reddit users /u/bloc97 and /u/emozilla.
192
+ """
193
+
194
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
195
+ self.scaling_factor = scaling_factor
196
+ super().__init__(dim, max_position_embeddings, base, device)
197
+
198
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
199
+ self.max_seq_len_cached = seq_len
200
+
201
+ if seq_len > self.max_position_embeddings:
202
+ base = self.base * (
203
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
204
+ ) ** (self.dim / (self.dim - 2))
205
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
206
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
207
+
208
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
209
+
210
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
211
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
212
+ emb = torch.cat((freqs, freqs), dim=-1)
213
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
214
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
215
+
216
+
217
+ # Copied from transformers.model.llama.modeling_llama.rotate_half
218
+ def rotate_half(x):
219
+ """Rotates half the hidden dims of the input."""
220
+ x1 = x[..., : x.shape[-1] // 2]
221
+ x2 = x[..., x.shape[-1] // 2 :]
222
+ return torch.cat((-x2, x1), dim=-1)
223
+
224
+
225
+ # Copied from transformers.model.llama.modeling_llama.apply_rotary_pos_emb
226
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
227
+ """Applies Rotary Position Embedding to the query and key tensors."""
228
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
229
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
230
+ q_embed = (q * cos) + (rotate_half(q) * sin)
231
+ k_embed = (k * cos) + (rotate_half(k) * sin)
232
+ return q_embed, k_embed
233
+
234
+
235
+ class InternLM2MLP(nn.Module):
236
+ def __init__(self, config):
237
+ super().__init__()
238
+ self.config = config
239
+ self.hidden_size = config.hidden_size
240
+ self.intermediate_size = config.intermediate_size
241
+ self.w1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
242
+ self.w3 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
243
+ self.w2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
244
+ self.act_fn = ACT2FN[config.hidden_act]
245
+
246
+ def forward(self, x):
247
+ down_proj = self.w2(self.act_fn(self.w1(x)) * self.w3(x))
248
+
249
+ return down_proj
250
+
251
+
252
+ # Copied from transformers.model.llama.modeling_llama.repeat_kv
253
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
254
+ """
255
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
256
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
257
+ """
258
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
259
+ if n_rep == 1:
260
+ return hidden_states
261
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
262
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
263
+
264
+
265
+ # Modified from transformers.model.llama.modeling_llama.LlamaAttention
266
+ class InternLM2Attention(nn.Module):
267
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
268
+
269
+ def __init__(self, config: InternLM2Config):
270
+ super().__init__()
271
+ self.config = config
272
+ self.hidden_size = config.hidden_size
273
+ self.num_heads = config.num_attention_heads
274
+ self.head_dim = self.hidden_size // self.num_heads
275
+ self.num_key_value_heads = config.num_key_value_heads
276
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
277
+ self.max_position_embeddings = config.max_position_embeddings
278
+ self.is_causal = True
279
+
280
+ if (self.head_dim * self.num_heads) != self.hidden_size:
281
+ raise ValueError(
282
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
283
+ f" and `num_heads`: {self.num_heads})."
284
+ )
285
+
286
+ self.wqkv = nn.Linear(
287
+ self.hidden_size,
288
+ (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
289
+ bias=config.bias,
290
+ )
291
+
292
+ self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
293
+ self._init_rope()
294
+
295
+ def _init_rope(self):
296
+ if self.config.rope_scaling is None:
297
+ self.rotary_emb = InternLM2RotaryEmbedding(
298
+ self.head_dim,
299
+ max_position_embeddings=self.max_position_embeddings,
300
+ base=self.config.rope_theta,
301
+ )
302
+ else:
303
+ scaling_type = self.config.rope_scaling["type"]
304
+ scaling_factor = self.config.rope_scaling["factor"]
305
+ if scaling_type == "dynamic":
306
+ self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding(
307
+ self.head_dim,
308
+ max_position_embeddings=self.max_position_embeddings,
309
+ base=self.config.rope_theta,
310
+ scaling_factor=scaling_factor,
311
+ )
312
+ elif scaling_type == "linear":
313
+ self.rotary_emb = InternLM2LinearScalingRotaryEmbedding(
314
+ self.head_dim,
315
+ max_position_embeddings=self.max_position_embeddings,
316
+ base=self.config.rope_theta,
317
+ scaling_factor=scaling_factor,
318
+ )
319
+ else:
320
+ raise ValueError("Currently we only support rotary embedding's type being 'dynamic' or 'linear'.")
321
+ return self.rotary_emb
322
+
323
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
324
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
325
+
326
+ def forward(
327
+ self,
328
+ hidden_states: torch.Tensor,
329
+ attention_mask: Optional[torch.Tensor] = None,
330
+ position_ids: Optional[torch.LongTensor] = None,
331
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
332
+ output_attentions: bool = False,
333
+ use_cache: bool = False,
334
+ **kwargs,
335
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
336
+ if "padding_mask" in kwargs:
337
+ warnings.warn(
338
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
339
+ "Please make sure use `attention_mask` instead.`"
340
+ )
341
+
342
+ bsz, q_len, _ = hidden_states.size()
343
+
344
+ qkv_states = self.wqkv(hidden_states)
345
+
346
+ qkv_states = rearrange(
347
+ qkv_states,
348
+ "b q (h gs d) -> b q h gs d",
349
+ gs=2 + self.num_key_value_groups,
350
+ d=self.head_dim,
351
+ )
352
+
353
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
354
+ query_states = rearrange(query_states, "b q h gs d -> b q (h gs) d")
355
+ key_states = qkv_states[..., -2, :]
356
+ value_states = qkv_states[..., -1, :]
357
+
358
+ query_states = query_states.transpose(1, 2)
359
+ key_states = key_states.transpose(1, 2)
360
+ value_states = value_states.transpose(1, 2)
361
+
362
+ kv_seq_len = key_states.shape[-2]
363
+ if past_key_value is not None:
364
+ kv_seq_len += past_key_value[0].shape[-2]
365
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
366
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
367
+
368
+ if past_key_value is not None:
369
+ # reuse k, v, self_attention
370
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
371
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
372
+
373
+ past_key_value = (key_states, value_states) if use_cache else None
374
+
375
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
376
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
377
+
378
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
379
+
380
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
381
+ raise ValueError(
382
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
383
+ f" {attn_weights.size()}"
384
+ )
385
+
386
+ if attention_mask is not None:
387
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
388
+ raise ValueError(
389
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
390
+ )
391
+ attn_weights = attn_weights + attention_mask
392
+
393
+ # upcast attention to fp32
394
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
395
+ attn_output = torch.matmul(attn_weights, value_states)
396
+
397
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
398
+ raise ValueError(
399
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
400
+ f" {attn_output.size()}"
401
+ )
402
+
403
+ attn_output = attn_output.transpose(1, 2).contiguous()
404
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
405
+
406
+ attn_output = self.wo(attn_output)
407
+
408
+ if not output_attentions:
409
+ attn_weights = None
410
+
411
+ return attn_output, attn_weights, past_key_value
412
+
413
+
414
+ # Modified from transformers.model.llama.modeling_llama.InternLM2FlashAttention2
415
+ class InternLM2FlashAttention2(InternLM2Attention):
416
+ """
417
+ InternLM2 flash attention module. This module inherits from `InternLM2Attention` as the weights of the module stays
418
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
419
+ flash attention and deal with padding tokens in case the input contains any of them.
420
+ """
421
+
422
+ def forward(
423
+ self,
424
+ hidden_states: torch.Tensor,
425
+ attention_mask: Optional[torch.LongTensor] = None,
426
+ position_ids: Optional[torch.LongTensor] = None,
427
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
428
+ output_attentions: bool = False,
429
+ use_cache: bool = False,
430
+ **kwargs,
431
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
432
+ # InternLM2FlashAttention2 attention does not support output_attentions
433
+ if "padding_mask" in kwargs:
434
+ warnings.warn(
435
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
436
+ "Please make sure use `attention_mask` instead.`"
437
+ )
438
+
439
+ # overwrite attention_mask with padding_mask
440
+ attention_mask = kwargs.pop("padding_mask")
441
+
442
+ output_attentions = False
443
+
444
+ bsz, q_len, _ = hidden_states.size()
445
+
446
+ qkv_states = self.wqkv(hidden_states)
447
+
448
+ qkv_states = rearrange(
449
+ qkv_states,
450
+ "b q (h gs d) -> b q h gs d",
451
+ gs=2 + self.num_key_value_groups,
452
+ d=self.head_dim,
453
+ )
454
+
455
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
456
+ query_states = rearrange(query_states, "b q h gs d -> b q (h gs) d")
457
+ key_states = qkv_states[..., -2, :]
458
+ value_states = qkv_states[..., -1, :]
459
+
460
+ query_states = query_states.transpose(1, 2)
461
+ key_states = key_states.transpose(1, 2)
462
+ value_states = value_states.transpose(1, 2)
463
+
464
+ kv_seq_len = key_states.shape[-2]
465
+ if past_key_value is not None:
466
+ kv_seq_len += past_key_value[0].shape[-2]
467
+
468
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
469
+
470
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
471
+
472
+ if past_key_value is not None:
473
+ # reuse k, v, self_attention
474
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
475
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
476
+
477
+ past_key_value = (key_states, value_states) if use_cache else None
478
+
479
+ query_states = query_states.transpose(1, 2)
480
+ key_states = key_states.transpose(1, 2)
481
+ value_states = value_states.transpose(1, 2)
482
+
483
+ attn_output = self._flash_attention_forward(
484
+ query_states, key_states, value_states, attention_mask, q_len
485
+ )
486
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
487
+ attn_output = self.wo(attn_output)
488
+
489
+ if not output_attentions:
490
+ attn_weights = None
491
+
492
+ return attn_output, attn_weights, past_key_value
493
+
494
+ def _flash_attention_forward(
495
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
496
+ ):
497
+ """
498
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
499
+ first unpad the input, then computes the attention scores and pad the final attention scores.
500
+
501
+ Args:
502
+ query_states (`torch.Tensor`):
503
+ Input query states to be passed to Flash Attention API
504
+ key_states (`torch.Tensor`):
505
+ Input key states to be passed to Flash Attention API
506
+ value_states (`torch.Tensor`):
507
+ Input value states to be passed to Flash Attention API
508
+ attention_mask (`torch.Tensor`):
509
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
510
+ position of padding tokens and 1 for the position of non-padding tokens.
511
+ dropout (`int`, *optional*):
512
+ Attention dropout
513
+ softmax_scale (`float`, *optional*):
514
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
515
+ """
516
+ # Contains at least one padding token in the sequence
517
+ causal = self.is_causal and query_length != 1
518
+ if attention_mask is not None:
519
+ batch_size = query_states.shape[0]
520
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._unpad_input(
521
+ query_states, key_states, value_states, attention_mask, query_length
522
+ )
523
+
524
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
525
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
526
+
527
+ attn_output_unpad = flash_attn_varlen_func(
528
+ query_states,
529
+ key_states,
530
+ value_states,
531
+ cu_seqlens_q=cu_seqlens_q,
532
+ cu_seqlens_k=cu_seqlens_k,
533
+ max_seqlen_q=max_seqlen_in_batch_q,
534
+ max_seqlen_k=max_seqlen_in_batch_k,
535
+ dropout_p=dropout,
536
+ softmax_scale=softmax_scale,
537
+ causal=causal,
538
+ )
539
+
540
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
541
+ else:
542
+ attn_output = flash_attn_func(
543
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
544
+ )
545
+
546
+ return attn_output
547
+
548
+ def _unpad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
549
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
550
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
551
+
552
+ key_layer = index_first_axis(
553
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
554
+ )
555
+ value_layer = index_first_axis(
556
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
557
+ )
558
+
559
+ if query_length == kv_seq_len:
560
+ query_layer = index_first_axis(
561
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
562
+ )
563
+ cu_seqlens_q = cu_seqlens_k
564
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
565
+ indices_q = indices_k
566
+ elif query_length == 1:
567
+ max_seqlen_in_batch_q = 1
568
+ cu_seqlens_q = torch.arange(
569
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
570
+ ) # There is a memcpy here, that is very bad.
571
+ indices_q = cu_seqlens_q[:-1]
572
+ query_layer = query_layer.squeeze(1)
573
+ else:
574
+ # The -q_len: slice assumes left padding.
575
+ attention_mask = attention_mask[:, -query_length:]
576
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
577
+
578
+ return (
579
+ query_layer,
580
+ key_layer,
581
+ value_layer,
582
+ indices_q.to(torch.int64),
583
+ (cu_seqlens_q, cu_seqlens_k),
584
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
585
+ )
586
+
587
+ INTERNLM2_ATTENTION_CLASSES = {
588
+ "eager": InternLM2Attention,
589
+ "flash_attention_2": InternLM2FlashAttention2,
590
+ }
591
+
592
+ # Modified from transformers.model.llama.modeling_llama.LlamaDecoderLayer
593
+ class InternLM2DecoderLayer(nn.Module):
594
+ def __init__(self, config: InternLM2Config):
595
+ super().__init__()
596
+ self.hidden_size = config.hidden_size
597
+
598
+ self.attention = INTERNLM2_ATTENTION_CLASSES[config.attn_implementation](config=config)
599
+
600
+ self.feed_forward = InternLM2MLP(config)
601
+ self.attention_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
602
+ self.ffn_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
603
+
604
+ def forward(
605
+ self,
606
+ hidden_states: torch.Tensor,
607
+ attention_mask: Optional[torch.Tensor] = None,
608
+ position_ids: Optional[torch.LongTensor] = None,
609
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
610
+ output_attentions: Optional[bool] = False,
611
+ use_cache: Optional[bool] = False,
612
+ **kwargs,
613
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
614
+ """
615
+ Args:
616
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
617
+ attention_mask (`torch.FloatTensor`, *optional*):
618
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
619
+ query_sequence_length, key_sequence_length)` if default attention is used.
620
+ output_attentions (`bool`, *optional*):
621
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
622
+ returned tensors for more detail.
623
+ use_cache (`bool`, *optional*):
624
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
625
+ (see `past_key_values`).
626
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
627
+ """
628
+ if "padding_mask" in kwargs:
629
+ warnings.warn(
630
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
631
+ "Please make sure use `attention_mask` instead.`"
632
+ )
633
+
634
+ residual = hidden_states
635
+
636
+ hidden_states = self.attention_norm(hidden_states)
637
+
638
+ # Self Attention
639
+ hidden_states, self_attn_weights, present_key_value = self.attention(
640
+ hidden_states=hidden_states,
641
+ attention_mask=attention_mask,
642
+ position_ids=position_ids,
643
+ past_key_value=past_key_value,
644
+ output_attentions=output_attentions,
645
+ use_cache=use_cache,
646
+ **kwargs,
647
+ )
648
+ hidden_states = residual + hidden_states
649
+
650
+ # Fully Connected
651
+ residual = hidden_states
652
+ hidden_states = self.ffn_norm(hidden_states)
653
+ hidden_states = self.feed_forward(hidden_states)
654
+ hidden_states = residual + hidden_states
655
+
656
+ outputs = (hidden_states,)
657
+
658
+ if output_attentions:
659
+ outputs += (self_attn_weights,)
660
+
661
+ if use_cache:
662
+ outputs += (present_key_value,)
663
+
664
+ return outputs
665
+
666
+
667
+ InternLM2_START_DOCSTRING = r"""
668
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
669
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
670
+ etc.)
671
+
672
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
673
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
674
+ and behavior.
675
+
676
+ Parameters:
677
+ config ([`InternLM2Config`]):
678
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
679
+ load the weights associated with the model, only the configuration. Check out the
680
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
681
+ """
682
+
683
+
684
+ # Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel with Llama->InternLM2
685
+ @add_start_docstrings(
686
+ "The bare InternLM2 Model outputting raw hidden-states without any specific head on top.",
687
+ InternLM2_START_DOCSTRING,
688
+ )
689
+ class InternLM2PreTrainedModel(PreTrainedModel):
690
+ config_class = InternLM2Config
691
+ base_model_prefix = "model"
692
+ supports_gradient_checkpointing = True
693
+ _no_split_modules = ["InternLM2DecoderLayer"]
694
+ _skip_keys_device_placement = "past_key_values"
695
+
696
+ def _init_weights(self, module):
697
+ std = self.config.initializer_range
698
+ if isinstance(module, nn.Linear):
699
+ module.weight.data.normal_(mean=0.0, std=std)
700
+ if module.bias is not None:
701
+ module.bias.data.zero_()
702
+ elif isinstance(module, nn.Embedding):
703
+ module.weight.data.normal_(mean=0.0, std=std)
704
+ if module.padding_idx is not None:
705
+ module.weight.data[module.padding_idx].zero_()
706
+
707
+
708
+ InternLM2_INPUTS_DOCSTRING = r"""
709
+ Args:
710
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
711
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
712
+ it.
713
+
714
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
715
+ [`PreTrainedTokenizer.__call__`] for details.
716
+
717
+ [What are input IDs?](../glossary#input-ids)
718
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
719
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
720
+
721
+ - 1 for tokens that are **not masked**,
722
+ - 0 for tokens that are **masked**.
723
+
724
+ [What are attention masks?](../glossary#attention-mask)
725
+
726
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
727
+ [`PreTrainedTokenizer.__call__`] for details.
728
+
729
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
730
+ `past_key_values`).
731
+
732
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
733
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
734
+ information on the default strategy.
735
+
736
+ - 1 indicates the head is **not masked**,
737
+ - 0 indicates the head is **masked**.
738
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
739
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
740
+ config.n_positions - 1]`.
741
+
742
+ [What are position IDs?](../glossary#position-ids)
743
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or
744
+ when `config.use_cache=True`):
745
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
746
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
747
+ `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)`.
748
+
749
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
750
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
751
+
752
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
753
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
754
+ of shape `(batch_size, sequence_length)`.
755
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
756
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
757
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
758
+ model's internal embedding lookup matrix.
759
+ use_cache (`bool`, *optional*):
760
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
761
+ `past_key_values`).
762
+ output_attentions (`bool`, *optional*):
763
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
764
+ tensors for more detail.
765
+ output_hidden_states (`bool`, *optional*):
766
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
767
+ more detail.
768
+ return_dict (`bool`, *optional*):
769
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
770
+ """
771
+
772
+
773
+ # Modified from transformers.model.llama.modeling_llama.LlamaModel
774
+ @add_start_docstrings(
775
+ "The bare InternLM2 Model outputting raw hidden-states without any specific head on top.",
776
+ InternLM2_START_DOCSTRING,
777
+ )
778
+ class InternLM2Model(InternLM2PreTrainedModel):
779
+ """
780
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLM2DecoderLayer`]
781
+
782
+ Args:
783
+ config: InternLM2Config
784
+ """
785
+
786
+ _auto_class = "AutoModel"
787
+
788
+ def __init__(self, config: InternLM2Config):
789
+ super().__init__(config)
790
+ self.padding_idx = config.pad_token_id
791
+ self.vocab_size = config.vocab_size
792
+ self.config = config
793
+
794
+ self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
795
+
796
+ self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)])
797
+ self.norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
798
+
799
+ self.gradient_checkpointing = False
800
+ # Initialize weights and apply final processing
801
+ self.post_init()
802
+
803
+ def get_input_embeddings(self):
804
+ return self.tok_embeddings
805
+
806
+ def set_input_embeddings(self, value):
807
+ self.tok_embeddings = value
808
+
809
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
810
+ # create causal mask
811
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
812
+ combined_attention_mask = None
813
+ if input_shape[-1] > 1:
814
+ combined_attention_mask = _make_causal_mask(
815
+ input_shape,
816
+ inputs_embeds.dtype,
817
+ device=inputs_embeds.device,
818
+ past_key_values_length=past_key_values_length,
819
+ )
820
+
821
+ if attention_mask is not None:
822
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
823
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
824
+ inputs_embeds.device
825
+ )
826
+ combined_attention_mask = (
827
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
828
+ )
829
+
830
+ return combined_attention_mask
831
+
832
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
833
+ def forward(
834
+ self,
835
+ input_ids: torch.LongTensor = None,
836
+ attention_mask: Optional[torch.Tensor] = None,
837
+ position_ids: Optional[torch.LongTensor] = None,
838
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
839
+ inputs_embeds: Optional[torch.FloatTensor] = None,
840
+ use_cache: Optional[bool] = None,
841
+ output_attentions: Optional[bool] = None,
842
+ output_hidden_states: Optional[bool] = None,
843
+ return_dict: Optional[bool] = None,
844
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
845
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
846
+ output_hidden_states = (
847
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
848
+ )
849
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
850
+
851
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
852
+
853
+ if self.config.attn_implementation == "flash_attention_2":
854
+ _import_flash_attn()
855
+
856
+ # retrieve input_ids and inputs_embeds
857
+ if input_ids is not None and inputs_embeds is not None:
858
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
859
+ elif input_ids is not None:
860
+ batch_size, seq_length = input_ids.shape[:2]
861
+ elif inputs_embeds is not None:
862
+ batch_size, seq_length = inputs_embeds.shape[:2]
863
+ else:
864
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
865
+
866
+ seq_length_with_past = seq_length
867
+ past_key_values_length = 0
868
+ if past_key_values is not None:
869
+ past_key_values_length = past_key_values[0][0].shape[2]
870
+ seq_length_with_past = seq_length_with_past + past_key_values_length
871
+
872
+ if position_ids is None:
873
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
874
+ position_ids = torch.arange(
875
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
876
+ )
877
+ position_ids = position_ids.unsqueeze(0)
878
+
879
+ if inputs_embeds is None:
880
+ inputs_embeds = self.tok_embeddings(input_ids)
881
+
882
+ if self.config.attn_implementation == "flash_attention_2":
883
+ # 2d mask is passed through the layers
884
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
885
+ else:
886
+ if attention_mask is None:
887
+ attention_mask = torch.ones(
888
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
889
+ )
890
+ attention_mask = self._prepare_decoder_attention_mask(
891
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
892
+ )
893
+
894
+ # embed positions
895
+ hidden_states = inputs_embeds
896
+
897
+ if self.gradient_checkpointing and self.training:
898
+ if use_cache:
899
+ logger.warning_once(
900
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
901
+ )
902
+ use_cache = False
903
+
904
+ # decoder layers
905
+ all_hidden_states = () if output_hidden_states else None
906
+ all_self_attns = () if output_attentions else None
907
+ next_decoder_cache = () if use_cache else None
908
+
909
+ for idx, decoder_layer in enumerate(self.layers):
910
+ if output_hidden_states:
911
+ all_hidden_states += (hidden_states,)
912
+
913
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
914
+
915
+ if self.gradient_checkpointing and self.training:
916
+
917
+ def create_custom_forward(module):
918
+ def custom_forward(*inputs):
919
+ # None for past_key_value
920
+ return module(*inputs, output_attentions, None)
921
+
922
+ return custom_forward
923
+
924
+ layer_outputs = torch.utils.checkpoint.checkpoint(
925
+ create_custom_forward(decoder_layer),
926
+ hidden_states,
927
+ attention_mask,
928
+ position_ids,
929
+ None,
930
+ )
931
+ else:
932
+ layer_outputs = decoder_layer(
933
+ hidden_states,
934
+ attention_mask=attention_mask,
935
+ position_ids=position_ids,
936
+ past_key_value=past_key_value,
937
+ output_attentions=output_attentions,
938
+ use_cache=use_cache,
939
+ )
940
+
941
+ hidden_states = layer_outputs[0]
942
+
943
+ if use_cache:
944
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
945
+
946
+ if output_attentions:
947
+ all_self_attns += (layer_outputs[1],)
948
+
949
+ hidden_states = self.norm(hidden_states)
950
+
951
+ # add hidden states from the last decoder layer
952
+ if output_hidden_states:
953
+ all_hidden_states += (hidden_states,)
954
+
955
+ next_cache = next_decoder_cache if use_cache else None
956
+ if not return_dict:
957
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
958
+ return BaseModelOutputWithPast(
959
+ last_hidden_state=hidden_states,
960
+ past_key_values=next_cache,
961
+ hidden_states=all_hidden_states,
962
+ attentions=all_self_attns,
963
+ )
964
+
965
+
966
+ # Modified from transformers.model.llama.modeling_llama.LlamaForCausalLM
967
+ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
968
+ _auto_class = "AutoModelForCausalLM"
969
+
970
+ _tied_weights_keys = ["output.weight"]
971
+
972
+ def __init__(self, config):
973
+ super().__init__(config)
974
+ self.model = InternLM2Model(config)
975
+ self.vocab_size = config.vocab_size
976
+ self.output = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
977
+
978
+ # Initialize weights and apply final processing
979
+ self.post_init()
980
+
981
+ def get_input_embeddings(self):
982
+ return self.model.tok_embeddings
983
+
984
+ def set_input_embeddings(self, value):
985
+ self.model.tok_embeddings = value
986
+
987
+ def get_output_embeddings(self):
988
+ return self.output
989
+
990
+ def set_output_embeddings(self, new_embeddings):
991
+ self.output = new_embeddings
992
+
993
+ def set_decoder(self, decoder):
994
+ self.model = decoder
995
+
996
+ def get_decoder(self):
997
+ return self.model
998
+
999
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1000
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1001
+ def forward(
1002
+ self,
1003
+ input_ids: torch.LongTensor = None,
1004
+ attention_mask: Optional[torch.Tensor] = None,
1005
+ position_ids: Optional[torch.LongTensor] = None,
1006
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1007
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1008
+ labels: Optional[torch.LongTensor] = None,
1009
+ use_cache: Optional[bool] = None,
1010
+ output_attentions: Optional[bool] = None,
1011
+ output_hidden_states: Optional[bool] = None,
1012
+ return_dict: Optional[bool] = None,
1013
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1014
+ r"""
1015
+ Args:
1016
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1017
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1018
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1019
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1020
+
1021
+ Returns:
1022
+
1023
+ Example:
1024
+
1025
+ ```python
1026
+ >>> from transformers import AutoTokenizer, InternLM2ForCausalLM
1027
+
1028
+ >>> model = InternLM2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1029
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1030
+
1031
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1032
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1033
+
1034
+ >>> # Generate
1035
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1036
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1037
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1038
+ ```"""
1039
+
1040
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1041
+ output_hidden_states = (
1042
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1043
+ )
1044
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1045
+
1046
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1047
+ outputs = self.model(
1048
+ input_ids=input_ids,
1049
+ attention_mask=attention_mask,
1050
+ position_ids=position_ids,
1051
+ past_key_values=past_key_values,
1052
+ inputs_embeds=inputs_embeds,
1053
+ use_cache=use_cache,
1054
+ output_attentions=output_attentions,
1055
+ output_hidden_states=output_hidden_states,
1056
+ return_dict=return_dict,
1057
+ )
1058
+
1059
+ hidden_states = outputs[0]
1060
+ logits = self.output(hidden_states)
1061
+ logits = logits.float()
1062
+
1063
+ loss = None
1064
+ if labels is not None:
1065
+ # Shift so that tokens < n predict n
1066
+ shift_logits = logits[..., :-1, :].contiguous()
1067
+ shift_labels = labels[..., 1:].contiguous()
1068
+ # Flatten the tokens
1069
+ loss_fct = CrossEntropyLoss()
1070
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1071
+ shift_labels = shift_labels.view(-1)
1072
+ # Enable model parallelism
1073
+ shift_labels = shift_labels.to(shift_logits.device)
1074
+ loss = loss_fct(shift_logits, shift_labels)
1075
+
1076
+ if not return_dict:
1077
+ output = (logits,) + outputs[1:]
1078
+ return (loss,) + output if loss is not None else output
1079
+
1080
+ return CausalLMOutputWithPast(
1081
+ loss=loss,
1082
+ logits=logits,
1083
+ past_key_values=outputs.past_key_values,
1084
+ hidden_states=outputs.hidden_states,
1085
+ attentions=outputs.attentions,
1086
+ )
1087
+
1088
+ def prepare_inputs_for_generation(
1089
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1090
+ ):
1091
+ if past_key_values is not None:
1092
+ past_length = past_key_values[0][0].shape[2]
1093
+
1094
+ # Some generation methods already pass only the last input ID
1095
+ if input_ids.shape[1] > past_length:
1096
+ remove_prefix_length = past_length
1097
+ else:
1098
+ # Default to old behavior: keep only final ID
1099
+ remove_prefix_length = input_ids.shape[1] - 1
1100
+
1101
+ input_ids = input_ids[:, remove_prefix_length:]
1102
+
1103
+ position_ids = kwargs.get("position_ids", None)
1104
+ if attention_mask is not None and position_ids is None:
1105
+ # create position_ids on the fly for batch generation
1106
+ position_ids = attention_mask.long().cumsum(-1) - 1
1107
+ position_ids.masked_fill_(attention_mask == 0, 1)
1108
+ if past_key_values:
1109
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1110
+
1111
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1112
+ if inputs_embeds is not None and past_key_values is None:
1113
+ model_inputs = {"inputs_embeds": inputs_embeds}
1114
+ else:
1115
+ model_inputs = {"input_ids": input_ids}
1116
+
1117
+ model_inputs.update(
1118
+ {
1119
+ "position_ids": position_ids,
1120
+ "past_key_values": past_key_values,
1121
+ "use_cache": kwargs.get("use_cache"),
1122
+ "attention_mask": attention_mask,
1123
+ }
1124
+ )
1125
+ return model_inputs
1126
+
1127
+ @staticmethod
1128
+ def _reorder_cache(past_key_values, beam_idx):
1129
+ reordered_past = ()
1130
+ for layer_past in past_key_values:
1131
+ reordered_past += (
1132
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1133
+ )
1134
+ return reordered_past
1135
+
1136
+ def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = [], meta_instruction=""):
1137
+ if tokenizer.add_bos_token:
1138
+ prompt = ""
1139
+ else:
1140
+ prompt = tokenizer.bos_token
1141
+ if meta_instruction:
1142
+ prompt += f"""<|im_start|>system\n{meta_instruction}<|im_end|>\n"""
1143
+ for record in history:
1144
+ prompt += f"""<|im_start|>user\n{record[0]}<|im_end|>\n<|im_start|>assistant\n{record[1]}<|im_end|>\n"""
1145
+ prompt += f"""<|im_start|>user\n{query}<|im_end|>\n<|im_start|>assistant\n"""
1146
+ return tokenizer([prompt], return_tensors="pt")
1147
+
1148
+ @torch.no_grad()
1149
+ def chat(
1150
+ self,
1151
+ tokenizer,
1152
+ query: str,
1153
+ history: List[Tuple[str, str]] = [],
1154
+ streamer: Optional[BaseStreamer] = None,
1155
+ max_new_tokens: int = 1024,
1156
+ do_sample: bool = True,
1157
+ temperature: float = 0.8,
1158
+ top_p: float = 0.8,
1159
+ meta_instruction: str = "You are an AI assistant whose name is InternLM (书生·浦语).\n"
1160
+ "- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.\n"
1161
+ "- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文.",
1162
+ **kwargs,
1163
+ ):
1164
+ inputs = self.build_inputs(tokenizer, query, history, meta_instruction)
1165
+ inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
1166
+ # also add end-of-assistant token in eos token id to avoid unnecessary generation
1167
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(["<|im_end|>"])[0]]
1168
+ outputs = self.generate(
1169
+ **inputs,
1170
+ streamer=streamer,
1171
+ max_new_tokens=max_new_tokens,
1172
+ do_sample=do_sample,
1173
+ temperature=temperature,
1174
+ top_p=top_p,
1175
+ eos_token_id=eos_token_id,
1176
+ **kwargs,
1177
+ )
1178
+ outputs = outputs[0].cpu().tolist()[len(inputs["input_ids"][0]) :]
1179
+ response = tokenizer.decode(outputs, skip_special_tokens=True)
1180
+ response = response.split("<|im_end|>")[0]
1181
+ history = history + [(query, response)]
1182
+ return response, history
1183
+
1184
+ @torch.no_grad()
1185
+ def stream_chat(
1186
+ self,
1187
+ tokenizer,
1188
+ query: str,
1189
+ history: List[Tuple[str, str]] = [],
1190
+ max_new_tokens: int = 1024,
1191
+ do_sample: bool = True,
1192
+ temperature: float = 0.8,
1193
+ top_p: float = 0.8,
1194
+ **kwargs,
1195
+ ):
1196
+ """
1197
+ Return a generator in format: (response, history)
1198
+ Eg.
1199
+ ('你好,有什么可以帮助您的吗', [('你好', '你好,有什么可以帮助您的吗')])
1200
+ ('你好,有什么可以帮助您的吗?', [('你好', '你好,有什么可以帮助您的吗?')])
1201
+ """
1202
+ if BaseStreamer is None:
1203
+ raise ModuleNotFoundError(
1204
+ "The version of `transformers` is too low. Please make sure "
1205
+ "that you have installed `transformers>=4.28.0`."
1206
+ )
1207
+
1208
+ response_queue = queue.Queue(maxsize=20)
1209
+
1210
+ class ChatStreamer(BaseStreamer):
1211
+ def __init__(self, tokenizer) -> None:
1212
+ super().__init__()
1213
+ self.tokenizer = tokenizer
1214
+ self.queue = response_queue
1215
+ self.query = query
1216
+ self.history = history
1217
+ self.response = ""
1218
+ self.cache = []
1219
+ self.received_inputs = False
1220
+ self.queue.put((self.response, history + [(self.query, self.response)]))
1221
+
1222
+ def put(self, value):
1223
+ if len(value.shape) > 1 and value.shape[0] > 1:
1224
+ raise ValueError("ChatStreamer only supports batch size 1")
1225
+ elif len(value.shape) > 1:
1226
+ value = value[0]
1227
+
1228
+ if not self.received_inputs:
1229
+ # The first received value is input_ids, ignore here
1230
+ self.received_inputs = True
1231
+ return
1232
+
1233
+ self.cache.extend(value.tolist())
1234
+ token = self.tokenizer.decode(self.cache, skip_special_tokens=True)
1235
+ if token.strip() != "<|im_end|>":
1236
+ self.response = self.response + token
1237
+ history = self.history + [(self.query, self.response)]
1238
+ self.queue.put((self.response, history))
1239
+ self.cache = []
1240
+ else:
1241
+ self.end()
1242
+
1243
+ def end(self):
1244
+ self.queue.put(None)
1245
+
1246
+ def stream_producer():
1247
+ return self.chat(
1248
+ tokenizer=tokenizer,
1249
+ query=query,
1250
+ streamer=ChatStreamer(tokenizer=tokenizer),
1251
+ history=history,
1252
+ max_new_tokens=max_new_tokens,
1253
+ do_sample=do_sample,
1254
+ temperature=temperature,
1255
+ top_p=top_p,
1256
+ **kwargs,
1257
+ )
1258
+
1259
+ def consumer():
1260
+ producer = threading.Thread(target=stream_producer)
1261
+ producer.start()
1262
+ while True:
1263
+ res = response_queue.get()
1264
+ if res is None:
1265
+ return
1266
+ yield res
1267
+
1268
+ return consumer()
1269
+
1270
+
1271
+ # Copied from transformers.model.llama.modeling_llama.LlamaForSequenceClassification with Llama->InternLM2
1272
+ @add_start_docstrings(
1273
+ """
1274
+ The InternLM2 Model transformer with a sequence classification head on top (linear layer).
1275
+
1276
+ [`InternLM2ForSequenceClassification`] uses the last token in order to do the classification,
1277
+ as other causal models (e.g. GPT-2) do.
1278
+
1279
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1280
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1281
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1282
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1283
+ each row of the batch).
1284
+ """,
1285
+ InternLM2_START_DOCSTRING,
1286
+ )
1287
+ class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
1288
+ def __init__(self, config):
1289
+ super().__init__(config)
1290
+ self.num_labels = config.num_labels
1291
+ self.model = InternLM2Model(config)
1292
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1293
+
1294
+ # Initialize weights and apply final processing
1295
+ self.post_init()
1296
+
1297
+ def get_input_embeddings(self):
1298
+ return self.model.tok_embeddings
1299
+
1300
+ def set_input_embeddings(self, value):
1301
+ self.model.tok_embeddings = value
1302
+
1303
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1304
+ def forward(
1305
+ self,
1306
+ input_ids: torch.LongTensor = None,
1307
+ attention_mask: Optional[torch.Tensor] = None,
1308
+ position_ids: Optional[torch.LongTensor] = None,
1309
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1310
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1311
+ labels: Optional[torch.LongTensor] = None,
1312
+ use_cache: Optional[bool] = None,
1313
+ output_attentions: Optional[bool] = None,
1314
+ output_hidden_states: Optional[bool] = None,
1315
+ return_dict: Optional[bool] = None,
1316
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1317
+ r"""
1318
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1319
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1320
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1321
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1322
+ """
1323
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1324
+
1325
+ transformer_outputs = self.model(
1326
+ input_ids,
1327
+ attention_mask=attention_mask,
1328
+ position_ids=position_ids,
1329
+ past_key_values=past_key_values,
1330
+ inputs_embeds=inputs_embeds,
1331
+ use_cache=use_cache,
1332
+ output_attentions=output_attentions,
1333
+ output_hidden_states=output_hidden_states,
1334
+ return_dict=return_dict,
1335
+ )
1336
+ hidden_states = transformer_outputs[0]
1337
+ logits = self.score(hidden_states)
1338
+
1339
+ if input_ids is not None:
1340
+ batch_size = input_ids.shape[0]
1341
+ else:
1342
+ batch_size = inputs_embeds.shape[0]
1343
+
1344
+ if self.config.pad_token_id is None and batch_size != 1:
1345
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1346
+ if self.config.pad_token_id is None:
1347
+ sequence_lengths = -1
1348
+ else:
1349
+ if input_ids is not None:
1350
+ sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to(
1351
+ logits.device
1352
+ )
1353
+ else:
1354
+ sequence_lengths = -1
1355
+
1356
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1357
+
1358
+ loss = None
1359
+ if labels is not None:
1360
+ labels = labels.to(logits.device)
1361
+ if self.config.problem_type is None:
1362
+ if self.num_labels == 1:
1363
+ self.config.problem_type = "regression"
1364
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1365
+ self.config.problem_type = "single_label_classification"
1366
+ else:
1367
+ self.config.problem_type = "multi_label_classification"
1368
+
1369
+ if self.config.problem_type == "regression":
1370
+ loss_fct = MSELoss()
1371
+ if self.num_labels == 1:
1372
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1373
+ else:
1374
+ loss = loss_fct(pooled_logits, labels)
1375
+ elif self.config.problem_type == "single_label_classification":
1376
+ loss_fct = CrossEntropyLoss()
1377
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1378
+ elif self.config.problem_type == "multi_label_classification":
1379
+ loss_fct = BCEWithLogitsLoss()
1380
+ loss = loss_fct(pooled_logits, labels)
1381
+ if not return_dict:
1382
+ output = (pooled_logits,) + transformer_outputs[1:]
1383
+ return ((loss,) + output) if loss is not None else output
1384
+
1385
+ return SequenceClassifierOutputWithPast(
1386
+ loss=loss,
1387
+ logits=pooled_logits,
1388
+ past_key_values=transformer_outputs.past_key_values,
1389
+ hidden_states=transformer_outputs.hidden_states,
1390
+ attentions=transformer_outputs.attentions,
1391
+ )
special_tokens_map.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<|im_end|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ }
10
+ ],
11
+ "bos_token": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "eos_token": {
19
+ "content": "</s>",
20
+ "lstrip": false,
21
+ "normalized": false,
22
+ "rstrip": false,
23
+ "single_word": false
24
+ },
25
+ "pad_token": {
26
+ "content": "</s>",
27
+ "lstrip": false,
28
+ "normalized": false,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ },
32
+ "unk_token": {
33
+ "content": "<unk>",
34
+ "lstrip": false,
35
+ "normalized": false,
36
+ "rstrip": false,
37
+ "single_word": false
38
+ }
39
+ }
tokenization_internlm2.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on transformers/src/transformers/models/llama/tokenization_llama.py
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ """Tokenization classes for InternLM."""
19
+ import os
20
+ from shutil import copyfile
21
+ from typing import Any, Dict, List, Optional, Tuple
22
+
23
+ import sentencepiece as spm
24
+ from transformers.tokenization_utils import PreTrainedTokenizer
25
+ from transformers.utils import logging
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "./tokenizer.model"}
30
+
31
+ PRETRAINED_VOCAB_FILES_MAP = {}
32
+
33
+
34
+ # Modified from transformers.model.llama.tokenization_llama.LlamaTokenizer
35
+ class InternLM2Tokenizer(PreTrainedTokenizer):
36
+ """
37
+ Construct a InternLM2 tokenizer. Based on byte-level Byte-Pair-Encoding.
38
+
39
+ Args:
40
+ vocab_file (`str`):
41
+ Path to the vocabulary file.
42
+ """
43
+
44
+ vocab_files_names = VOCAB_FILES_NAMES
45
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
46
+ model_input_names = ["input_ids", "attention_mask"]
47
+ _auto_class = "AutoTokenizer"
48
+
49
+ def __init__(
50
+ self,
51
+ vocab_file,
52
+ unk_token="<unk>",
53
+ bos_token="<s>",
54
+ eos_token="</s>",
55
+ pad_token="</s>",
56
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
57
+ add_bos_token=True,
58
+ add_eos_token=False,
59
+ decode_with_prefix_space=False,
60
+ clean_up_tokenization_spaces=False,
61
+ **kwargs,
62
+ ):
63
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
64
+ self.vocab_file = vocab_file
65
+ self.add_bos_token = add_bos_token
66
+ self.add_eos_token = add_eos_token
67
+ self.decode_with_prefix_space = decode_with_prefix_space
68
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
69
+ self.sp_model.Load(vocab_file)
70
+ self._no_prefix_space_tokens = None
71
+ super().__init__(
72
+ bos_token=bos_token,
73
+ eos_token=eos_token,
74
+ unk_token=unk_token,
75
+ pad_token=pad_token,
76
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
77
+ **kwargs,
78
+ )
79
+
80
+ @property
81
+ def no_prefix_space_tokens(self):
82
+ if self._no_prefix_space_tokens is None:
83
+ vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
84
+ self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith("▁")}
85
+ return self._no_prefix_space_tokens
86
+
87
+ @property
88
+ def vocab_size(self):
89
+ """Returns vocab size"""
90
+ return self.sp_model.get_piece_size()
91
+
92
+ @property
93
+ def bos_token_id(self) -> Optional[int]:
94
+ return self.sp_model.bos_id()
95
+
96
+ @property
97
+ def eos_token_id(self) -> Optional[int]:
98
+ return self.sp_model.eos_id()
99
+
100
+ def get_vocab(self):
101
+ """Returns vocab as a dict"""
102
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
103
+ vocab.update(self.added_tokens_encoder)
104
+ return vocab
105
+
106
+ def _tokenize(self, text):
107
+ """Returns a tokenized string."""
108
+ return self.sp_model.encode(text, out_type=str)
109
+
110
+ def _convert_token_to_id(self, token):
111
+ """Converts a token (str) in an id using the vocab."""
112
+ return self.sp_model.piece_to_id(token)
113
+
114
+ def _convert_id_to_token(self, index):
115
+ """Converts an index (integer) in a token (str) using the vocab."""
116
+ token = self.sp_model.IdToPiece(index)
117
+ return token
118
+
119
+ def _maybe_add_prefix_space(self, tokens, decoded):
120
+ if tokens and tokens[0] not in self.no_prefix_space_tokens:
121
+ return " " + decoded
122
+ else:
123
+ return decoded
124
+
125
+ def convert_tokens_to_string(self, tokens):
126
+ """Converts a sequence of tokens (string) in a single string."""
127
+ current_sub_tokens = []
128
+ out_string = ""
129
+ prev_is_special = False
130
+ for token in tokens:
131
+ # make sure that special tokens are not decoded using sentencepiece model
132
+ if token in self.all_special_tokens:
133
+ if not prev_is_special:
134
+ out_string += " "
135
+ out_string += self.sp_model.decode(current_sub_tokens) + token
136
+ prev_is_special = True
137
+ current_sub_tokens = []
138
+ else:
139
+ current_sub_tokens.append(token)
140
+ prev_is_special = False
141
+ out_string += self.sp_model.decode(current_sub_tokens)
142
+ out_string = self.clean_up_tokenization(out_string)
143
+ out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
144
+ return out_string[1:]
145
+
146
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
147
+ """
148
+ Save the vocabulary and special tokens file to a directory.
149
+
150
+ Args:
151
+ save_directory (`str`):
152
+ The directory in which to save the vocabulary.
153
+
154
+ Returns:
155
+ `Tuple(str)`: Paths to the files saved.
156
+ """
157
+ if not os.path.isdir(save_directory):
158
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
159
+ return
160
+ out_vocab_file = os.path.join(
161
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
162
+ )
163
+
164
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
165
+ copyfile(self.vocab_file, out_vocab_file)
166
+ elif not os.path.isfile(self.vocab_file):
167
+ with open(out_vocab_file, "wb") as fi:
168
+ content_spiece_model = self.sp_model.serialized_model_proto()
169
+ fi.write(content_spiece_model)
170
+
171
+ return (out_vocab_file,)
172
+
173
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
174
+ if self.add_bos_token:
175
+ bos_token_ids = [self.bos_token_id]
176
+ else:
177
+ bos_token_ids = []
178
+
179
+ output = bos_token_ids + token_ids_0
180
+
181
+ if token_ids_1 is not None:
182
+ output = output + token_ids_1
183
+
184
+ if self.add_eos_token:
185
+ output = output + [self.eos_token_id]
186
+
187
+ return output
188
+
189
+ def get_special_tokens_mask(
190
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
191
+ ) -> List[int]:
192
+ """
193
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
194
+ special tokens using the tokenizer `prepare_for_model` method.
195
+
196
+ Args:
197
+ token_ids_0 (`List[int]`):
198
+ List of IDs.
199
+ token_ids_1 (`List[int]`, *optional*):
200
+ Optional second list of IDs for sequence pairs.
201
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
202
+ Whether or not the token list is already formatted with special tokens for the model.
203
+
204
+ Returns:
205
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
206
+ """
207
+ if already_has_special_tokens:
208
+ return super().get_special_tokens_mask(
209
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
210
+ )
211
+
212
+ if token_ids_1 is None:
213
+ return [1] + ([0] * len(token_ids_0)) + [1]
214
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
215
+
216
+ def create_token_type_ids_from_sequences(
217
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
218
+ ) -> List[int]:
219
+ """
220
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
221
+ use of token type ids, therefore a list of zeros is returned.
222
+
223
+ Args:
224
+ token_ids_0 (`List[int]`):
225
+ List of IDs.
226
+ token_ids_1 (`List[int]`, *optional*):
227
+ Optional second list of IDs for sequence pairs.
228
+
229
+ Returns:
230
+ `List[int]`: List of zeros.
231
+ """
232
+ eos = [self.eos_token_id]
233
+
234
+ if token_ids_1 is None:
235
+ return len(token_ids_0 + eos) * [0]
236
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f868398fc4e05ee1e8aeba95ddf18ddcc45b8bce55d5093bead5bbf80429b48b
3
+ size 1477754
tokenizer_config.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "92538": {
28
+ "content": "<|plugin|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "92539": {
36
+ "content": "<|interpreter|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "92540": {
44
+ "content": "<|action_end|>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "92541": {
52
+ "content": "<|action_start|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "92542": {
60
+ "content": "<|im_end|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "92543": {
68
+ "content": "<|im_start|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ }
75
+ },
76
+ "additional_special_tokens": [
77
+ "<|im_end|>"
78
+ ],
79
+ "auto_map": {
80
+ "AutoTokenizer": [
81
+ "tokenization_internlm2.InternLM2Tokenizer",
82
+ null
83
+ ]
84
+ },
85
+ "bos_token": "<s>",
86
+ "chat_template": "{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
87
+ "clean_up_tokenization_spaces": false,
88
+ "eos_token": "</s>",
89
+ "model_max_length": 1000000000000000019884624838656,
90
+ "pad_token": "</s>",
91
+ "padding_side": "right",
92
+ "split_special_tokens": false,
93
+ "tokenizer_class": "InternLM2Tokenizer",
94
+ "unk_token": "<unk>"
95
+ }
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.0,
3
+ "train_loss": 1.4981852107577853,
4
+ "train_runtime": 2910.9748,
5
+ "train_samples_per_second": 9.69,
6
+ "train_steps_per_second": 0.022
7
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {"current_steps": 10, "total_steps": 63, "loss": 2.1176, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.607381059352038e-07, "epoch": 1.43, "percentage": 15.87, "elapsed_time": "0:06:41", "remaining_time": "0:35:29"}
2
+ {"current_steps": 20, "total_steps": 63, "loss": 1.5678, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.117449009293668e-07, "epoch": 2.86, "percentage": 31.75, "elapsed_time": "0:13:19", "remaining_time": "0:28:39"}
3
+ {"current_steps": 30, "total_steps": 63, "loss": 1.4173, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.868240888334652e-07, "epoch": 4.29, "percentage": 47.62, "elapsed_time": "0:22:11", "remaining_time": "0:24:24"}
4
+ {"current_steps": 40, "total_steps": 63, "loss": 1.342, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.4075667487415785e-07, "epoch": 5.71, "percentage": 63.49, "elapsed_time": "0:28:46", "remaining_time": "0:16:32"}
5
+ {"current_steps": 50, "total_steps": 63, "loss": 1.3108, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3347406408508694e-07, "epoch": 7.14, "percentage": 79.37, "elapsed_time": "0:37:42", "remaining_time": "0:09:48"}
6
+ {"current_steps": 60, "total_steps": 63, "loss": 1.2987, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.5461356885461075e-08, "epoch": 8.57, "percentage": 95.24, "elapsed_time": "0:44:19", "remaining_time": "0:02:12"}
7
+ {"current_steps": 63, "total_steps": 63, "loss": null, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 9.0, "percentage": 100.0, "elapsed_time": "0:46:16", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 9.0,
5
+ "eval_steps": 500,
6
+ "global_step": 63,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.43,
13
+ "learning_rate": 9.607381059352038e-07,
14
+ "loss": 2.1176,
15
+ "step": 10
16
+ },
17
+ {
18
+ "epoch": 2.86,
19
+ "learning_rate": 8.117449009293668e-07,
20
+ "loss": 1.5678,
21
+ "step": 20
22
+ },
23
+ {
24
+ "epoch": 4.29,
25
+ "learning_rate": 5.868240888334652e-07,
26
+ "loss": 1.4173,
27
+ "step": 30
28
+ },
29
+ {
30
+ "epoch": 5.71,
31
+ "learning_rate": 3.4075667487415785e-07,
32
+ "loss": 1.342,
33
+ "step": 40
34
+ },
35
+ {
36
+ "epoch": 7.14,
37
+ "learning_rate": 1.3347406408508694e-07,
38
+ "loss": 1.3108,
39
+ "step": 50
40
+ },
41
+ {
42
+ "epoch": 8.57,
43
+ "learning_rate": 1.5461356885461075e-08,
44
+ "loss": 1.2987,
45
+ "step": 60
46
+ },
47
+ {
48
+ "epoch": 9.0,
49
+ "step": 63,
50
+ "total_flos": 8.187554675294208e+17,
51
+ "train_loss": 1.4981852107577853,
52
+ "train_runtime": 2910.9748,
53
+ "train_samples_per_second": 9.69,
54
+ "train_steps_per_second": 0.022
55
+ }
56
+ ],
57
+ "logging_steps": 10,
58
+ "max_steps": 63,
59
+ "num_input_tokens_seen": 0,
60
+ "num_train_epochs": 9,
61
+ "save_steps": 21,
62
+ "total_flos": 8.187554675294208e+17,
63
+ "train_batch_size": 4,
64
+ "trial_name": null,
65
+ "trial_params": null
66
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:587330976961c8bd5af75d46b680e9a44df1210ab6567118a65fa36696393e87
3
+ size 6200
training_loss.png ADDED