anilbhatt1 commited on
Commit
54200b7
1 Parent(s): 20e77c8

Initial commit

Browse files
Files changed (7) hide show
  1. app.py +57 -0
  2. base.py +222 -0
  3. config.py +1175 -0
  4. model.py +345 -0
  5. requirements.txt +4 -0
  6. tokenizer.py +107 -0
  7. utils.py +358 -0
app.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ def generate_text(context, num_samples, context_length, model_name):
4
+ from base import main
5
+ from pathlib import Path
6
+
7
+ if model_name == "pythia_160m_deduped_custom" or model_name == "pythia_160m_deduped_huggingface":
8
+ ckpt_dir = Path('checkpoints/EleutherAI/pythia-160m-deduped')
9
+ elif model_name == "pythia_70m_deduped":
10
+ ckpt_dir = Path('checkpoints/EleutherAI/pythia-70m-deduped')
11
+ elif model_name == "pythia_410m_deduped":
12
+ ckpt_dir = Path('checkpoints/EleutherAI/pythia-410m-deduped')
13
+
14
+ context = str(context)
15
+ num_samples = int(num_samples)
16
+ context_length = int(context_length)
17
+ model_name = str(model_name)
18
+
19
+ output_msg_list = main(prompt=context, checkpoint_dir=ckpt_dir, model_name=model_name, num_samples=num_samples, max_new_tokens=context_length)
20
+ output_msg = str()
21
+ for idx, msg in enumerate(output_msg_list):
22
+ title = f"--Generated message : {idx + 1} using the model : {model_name}--\n"
23
+ output_msg += f"{title}\n"
24
+ output_msg += f"{msg}\n"
25
+ output_msg += f"\n"
26
+ return output_msg
27
+
28
+ def gradio_fn(context, num_samples, context_length, model_name):
29
+ output_txt_msg = generate_text(context, num_samples, context_length, model_name)
30
+ return output_txt_msg
31
+
32
+ markdown_description = """
33
+ - This is a Gradio app that generates text based on
34
+ - given text context
35
+ - for given character length
36
+ - number of Samples
37
+ - using Selected GPT model
38
+ - Currently following models are available :
39
+ - **(a)** pythia_160m_deduped_huggingface **(b)** pythia_160m_deduped_custom \
40
+ **(c)** pythia_410m_deduped **(d)** pythia_70m_deduped
41
+ """
42
+ demo = gr.Interface(fn=gradio_fn,
43
+ inputs=[gr.Textbox(info="Start my passage with: 'I would like to'"),
44
+ gr.Number(value=1, minimum=1, maximum=5, \
45
+ info="Number of samples to be generated min=1, max=5"),
46
+ gr.Slider(value=50, minimum=50, maximum=250, \
47
+ info="Num characters for passage min=50, max=250"),
48
+ gr.Dropdown(["pythia_160m_deduped_huggingface", "pythia_160m_deduped_custom",
49
+ "pythia_410m_deduped", "pythia_70m_deduped"], \
50
+ multiselect=False, label="Model-Name", \
51
+ info="Pretrained model to be used for text generation")],
52
+ outputs=gr.Textbox(),
53
+ title="DialogGen - Dialogue Generator",
54
+ description=markdown_description,
55
+ article=" **Credits** : https://github.com/Lightning-AI/lit-gpt ")
56
+
57
+ demo.launch(share=True)
base.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import time
3
+ from pathlib import Path
4
+ from typing import Any, Literal, Optional
5
+
6
+ import lightning as L
7
+ import torch
8
+ import torch._dynamo.config
9
+ import torch._inductor.config
10
+ from lightning.fabric.plugins import BitsandbytesPrecision
11
+ from lightning.fabric.strategies import FSDPStrategy
12
+
13
+ # support running without installing as a package
14
+ wd = Path(__file__).parent.parent.resolve()
15
+ sys.path.append(str(wd))
16
+
17
+ from model import *
18
+ from utils import *
19
+ from tokenizer import *
20
+
21
+ def multinomial_num_samples_1(probs: torch.Tensor) -> torch.Tensor:
22
+ if torch._dynamo.is_compiling():
23
+ # Faster alternative to `torch.multinomial(probs, num_samples=1)` that is also CUDAGraph friendly
24
+ distribution = torch.empty_like(probs).exponential_(1)
25
+ return torch.argmax(probs / distribution, dim=-1, keepdim=True)
26
+ return torch.multinomial(probs, num_samples=1)
27
+
28
+
29
+ def sample(logits: torch.Tensor, temperature: float = 1.0, top_k: Optional[int] = None) -> torch.Tensor:
30
+ logits = logits[0, -1]
31
+ # optionally crop the logits to only the top k options
32
+ if top_k is not None:
33
+ v, i = torch.topk(logits, min(top_k, logits.size(-1)))
34
+ # do not use `torch.where` as in nanogpt because it will repeat top-k collisions
35
+ logits = torch.full_like(logits, float("-inf")).scatter_(-1, i, v)
36
+ # optionally scale the logits and sample from a probability distribution
37
+ if temperature > 0.0:
38
+ probs = torch.nn.functional.softmax(logits / temperature, dim=-1)
39
+ return multinomial_num_samples_1(probs)
40
+ return torch.argmax(logits, dim=-1, keepdim=True)
41
+
42
+
43
+ def next_token(model: GPT, input_pos: torch.Tensor, x: torch.Tensor, **kwargs: Any) -> torch.Tensor:
44
+ logits = model(x, input_pos)
45
+ next = sample(logits, **kwargs)
46
+ return next.type_as(x)
47
+
48
+
49
+ @torch.inference_mode()
50
+ def generate(
51
+ model: GPT,
52
+ prompt: torch.Tensor,
53
+ max_returned_tokens: int,
54
+ *,
55
+ temperature: float = 1.0,
56
+ top_k: Optional[int] = None,
57
+ eos_id: Optional[int] = None,
58
+ ) -> torch.Tensor:
59
+ """Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as requested.
60
+
61
+ The implementation of this function is modified from A. Karpathy's nanoGPT.
62
+
63
+ Args:
64
+ model: The model to use.
65
+ prompt: Tensor of shape (T) with indices of the prompt sequence.
66
+ max_returned_tokens: The maximum number of tokens to return (given plus generated).
67
+ temperature: Scales the predicted logits by 1 / temperature.
68
+ top_k: If specified, only sample among the tokens with the k highest probabilities.
69
+ eos_id: If specified, stop generating any more token once the <eos> token is triggered.
70
+ """
71
+ T = prompt.size(0)
72
+ assert max_returned_tokens > T
73
+ if model.max_seq_length < max_returned_tokens - 1:
74
+ # rolling the kv cache based on the `input_pos` value would be necessary. However, doing so would introduce a
75
+ # data dependency on the `input_pos` tensor and impact model compilation. Since this setting is uncommon, we do
76
+ # not support it to avoid negatively impacting the overall speed
77
+ raise NotImplementedError(f"max_seq_length {model.max_seq_length} needs to be >= {max_returned_tokens - 1}")
78
+
79
+ device = prompt.device
80
+ tokens = [prompt]
81
+ input_pos = torch.tensor([T], device=device)
82
+ token = next_token(
83
+ model, torch.arange(0, T, device=device), prompt.view(1, -1), temperature=temperature, top_k=top_k
84
+ ).clone()
85
+ tokens.append(token)
86
+ for _ in range(2, max_returned_tokens - T + 1):
87
+ token = next_token(model, input_pos, token.view(1, -1), temperature=temperature, top_k=top_k).clone()
88
+ tokens.append(token)
89
+ if token == eos_id:
90
+ break
91
+ input_pos = input_pos.add_(1)
92
+ return torch.cat(tokens)
93
+
94
+
95
+ def main(
96
+ prompt: str = "What food do llamas eat?",
97
+ *,
98
+ num_samples: int = 1,
99
+ max_new_tokens: int = 50,
100
+ top_k: Optional[int] = 200,
101
+ temperature: float = 0.8,
102
+ checkpoint_dir: Path = Path("checkpoints/stabilityai/stablelm-base-alpha-3b"),
103
+ quantize: Optional[Literal["bnb.nf4", "bnb.nf4-dq", "bnb.fp4", "bnb.fp4-dq", "bnb.int8", "gptq.int4"]] = None,
104
+ strategy: str = "auto",
105
+ devices: int = 1,
106
+ precision: Optional[str] = None,
107
+ compile: bool = False,
108
+ model_name: str = "pythia_160m_hf"
109
+ ) -> None:
110
+ """Generates text samples based on a pre-trained model and tokenizer.
111
+
112
+ Args:
113
+ prompt: The prompt string to use for generating the samples.
114
+ num_samples: The number of text samples to generate.
115
+ max_new_tokens: The number of generation steps to take.
116
+ top_k: The number of top most probable tokens to consider in the sampling process.
117
+ temperature: A value controlling the randomness of the sampling process. Higher values result in more random
118
+ samples.
119
+ checkpoint_dir: The checkpoint directory to load.
120
+ quantize: Whether to quantize the model and using which method:
121
+ - bnb.nf4, bnb.nf4-dq, bnb.fp4, bnb.fp4-dq: 4-bit quantization from bitsandbytes
122
+ - bnb.int8: 8-bit quantization from bitsandbytes
123
+ - gptq.int4: 4-bit quantization from GPTQ
124
+ for more details, see https://github.com/Lightning-AI/lit-gpt/blob/main/tutorials/quantize.md
125
+ strategy: Indicates the Fabric strategy setting to use.
126
+ devices: How many devices to use.
127
+ precision: Indicates the Fabric precision setting to use.
128
+ compile: Whether to compile the model.
129
+ """
130
+ precision = precision or get_default_supported_precision(training=False)
131
+
132
+ plugins = None
133
+ if quantize is not None:
134
+ if devices > 1:
135
+ raise NotImplementedError(
136
+ "Quantization is currently not supported for multi-GPU training. Please set devices=1 when using the"
137
+ " --quantize flag."
138
+ )
139
+ if quantize.startswith("bnb."):
140
+ if "mixed" in precision:
141
+ raise ValueError("Quantization and mixed precision is not supported.")
142
+ dtype = {"16-true": torch.float16, "bf16-true": torch.bfloat16, "32-true": torch.float32}[precision]
143
+ plugins = BitsandbytesPrecision(quantize[4:], dtype)
144
+ precision = None
145
+
146
+ if strategy == "fsdp":
147
+ strategy = FSDPStrategy(auto_wrap_policy={Block}, cpu_offload=False)
148
+
149
+ fabric = L.Fabric(devices=devices, precision=precision, strategy=strategy, plugins=plugins)
150
+ fabric.launch()
151
+
152
+ check_valid_checkpoint_dir(checkpoint_dir, model_name)
153
+
154
+ config = Config.from_json(checkpoint_dir / "lit_config.json")
155
+
156
+ if quantize == "gptq.int4":
157
+ model_file = "lit_model_gptq.4bit.pth"
158
+ if not (checkpoint_dir / model_file).is_file():
159
+ raise ValueError("Please run `python quantize/gptq.py` first")
160
+ else:
161
+ if model_name == "pythia_160m_deduped_huggingface":
162
+ model_file = "pythia_160m_deduped_hf.pth"
163
+ elif model_name == "pythia_160m_deduped_custom":
164
+ model_file = "pythia_160m_deduped_custom.pth"
165
+ else:
166
+ model_file = "lit_model.pth"
167
+ checkpoint_path = checkpoint_dir / model_file
168
+
169
+ tokenizer = Tokenizer(checkpoint_dir)
170
+ encoded = tokenizer.encode(prompt, device=fabric.device)
171
+ prompt_length = encoded.size(0)
172
+ max_returned_tokens = prompt_length + max_new_tokens
173
+
174
+ fabric.print(f"Loading model {str(checkpoint_path)!r} with {config.__dict__}", file=sys.stderr)
175
+ t0 = time.perf_counter()
176
+ with fabric.init_module(empty_init=True), gptq_quantization(quantize == "gptq.int4"):
177
+ model = GPT(config)
178
+ fabric.print(f"Time to instantiate model: {time.perf_counter() - t0:.02f} seconds.", file=sys.stderr)
179
+ with fabric.init_tensor():
180
+ # set the max_seq_length to limit the memory usage to what we need
181
+ model.max_seq_length = max_returned_tokens
182
+ # enable the kv cache
183
+ model.set_kv_cache(batch_size=1)
184
+ model.eval()
185
+
186
+ if compile:
187
+ torch._dynamo.config.automatic_dynamic_shapes = True
188
+ torch._inductor.config.triton.unique_kernel_names = True
189
+ torch._inductor.config.coordinate_descent_tuning = True
190
+ global next_token
191
+ next_token = torch.compile(next_token, mode="reduce-overhead")
192
+
193
+ model = fabric.setup_module(model)
194
+
195
+ t0 = time.perf_counter()
196
+ load_checkpoint(fabric, model, checkpoint_path)
197
+ fabric.print(f"Time to load the model weights: {time.perf_counter() - t0:.02f} seconds.", file=sys.stderr)
198
+
199
+ L.seed_everything(1234)
200
+ print(f'num_samples is {num_samples}')
201
+ output_msg_list = []
202
+ for i in range(num_samples):
203
+ t0 = time.perf_counter()
204
+ y = generate(model, encoded, max_returned_tokens, temperature=temperature, top_k=top_k)
205
+ t = time.perf_counter() - t0
206
+ for block in model.transformer.h:
207
+ block.attn.kv_cache.reset_parameters()
208
+ output_msg = tokenizer.decode(y)
209
+ tokens_generated = y.size(0) - prompt_length
210
+ output_msg_list.append(output_msg)
211
+ fabric.print(
212
+ f"Time for inference {i + 1}: {t:.02f} sec total, {tokens_generated / t:.02f} tokens/sec", file=sys.stderr
213
+ )
214
+ if fabric.device.type == "cuda":
215
+ fabric.print(f"Memory used: {torch.cuda.max_memory_allocated() / 1e9:.02f} GB", file=sys.stderr)
216
+ return output_msg_list
217
+
218
+ if __name__ == "__main__":
219
+ from jsonargparse import CLI
220
+
221
+ torch.set_float32_matmul_precision("high")
222
+ output_msg_list = CLI(main)
config.py ADDED
@@ -0,0 +1,1175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from copy import deepcopy
3
+ from dataclasses import dataclass, field
4
+ from pathlib import Path
5
+ from typing import Any, Literal, Optional, Type, Union
6
+
7
+ import torch
8
+ from typing_extensions import Self
9
+
10
+ import model
11
+ from utils import find_multiple
12
+
13
+
14
+ @dataclass
15
+ class Config:
16
+ name: str = ""
17
+ hf_config: dict = field(default_factory=dict)
18
+ block_size: int = 4096
19
+ vocab_size: int = 50254
20
+ padding_multiple: int = 512
21
+ padded_vocab_size: Optional[int] = None
22
+ n_layer: int = 16
23
+ n_head: int = 32
24
+ n_embd: int = 4096
25
+ rotary_percentage: float = 0.25
26
+ parallel_residual: bool = True
27
+ bias: bool = True
28
+ lm_head_bias: bool = False
29
+ # to use multi-head attention (MHA), set this to `n_head` (default)
30
+ # to use multi-query attention (MQA), set this to 1
31
+ # to use grouped-query attention (GQA), set this to a value in between
32
+ # Example with `n_head=4`
33
+ # ┌───┐┌───┐┌───┐┌───┐ ┌───┐ ┌───┐ ┌───┐
34
+ # │ v ││ v ││ v ││ v │ │ v │ │ v │ │ v │
35
+ # └───┘└───┘└───┘└───┘ └───┘ └───┘ └───┘
36
+ # │ │ │ │ │ │ │
37
+ # ┌───┐┌───┐┌───┐┌───┐ ┌───┐ ┌───┐ ┌───┐
38
+ # │ k ││ k ││ k ││ k │ │ k │ │ k │ │ k │
39
+ # └───┘└───┘└───┘└───┘ └───┘ └───┘ └───┘
40
+ # │ │ │ │ ┌──┴──┐ ┌──┴──┐ ┌────┬──┴─┬────┐
41
+ # ┌───┐┌───┐┌───┐┌───┐ ┌───┐┌───┐┌───┐┌───┐ ┌───┐┌───┐┌───┐┌───┐
42
+ # │ q ││ q ││ q ││ q │ │ q ││ q ││ q ││ q │ │ q ││ q ││ q ││ q │
43
+ # └───┘└───┘└───┘└───┘ └───┘└───┘└───┘└───┘ └───┘└───┘└───┘└───┘
44
+ # ◀──────────────────▶ ◀──────────────────▶ ◀──────────────────▶
45
+ # MHA GQA MQA
46
+ # n_query_groups=4 n_query_groups=2 n_query_groups=1
47
+ #
48
+ # credit https://arxiv.org/pdf/2305.13245.pdf
49
+ n_query_groups: Optional[int] = None
50
+ shared_attention_norm: bool = False
51
+ _norm_class: Literal["LayerNorm", "RMSNorm"] = "LayerNorm"
52
+ norm_eps: float = 1e-5
53
+ _mlp_class: Literal["GptNeoxMLP", "LLaMAMLP"] = "GptNeoxMLP"
54
+ gelu_approximate: str = "none"
55
+ intermediate_size: Optional[int] = None
56
+ rope_condense_ratio: int = 1
57
+ rope_base: int = 10000
58
+
59
+ def __post_init__(self):
60
+ if not self.name:
61
+ self.name = self.hf_config.get("name", self.name)
62
+
63
+ assert self.n_embd % self.n_head == 0
64
+ self.head_size = self.n_embd // self.n_head
65
+
66
+ # vocab size should be a power of 2 to be optimal on hardware. compute the closest value
67
+ if self.padded_vocab_size is None:
68
+ self.padded_vocab_size = find_multiple(self.vocab_size, self.padding_multiple)
69
+ else:
70
+ # vocab size shouldn't be larger than padded vocab size
71
+ self.vocab_size = min(self.vocab_size, self.padded_vocab_size)
72
+
73
+ # compute the number of query groups
74
+ if self.n_query_groups is not None:
75
+ assert self.n_head % self.n_query_groups == 0
76
+ else:
77
+ self.n_query_groups = self.n_head
78
+
79
+ # compute the intermediate size for MLP if not set
80
+ if self.intermediate_size is None:
81
+ if self._mlp_class == "LLaMAMLP":
82
+ raise ValueError("The config needs to set the `intermediate_size`")
83
+ self.intermediate_size = 4 * self.n_embd
84
+
85
+ self.rope_n_elem = int(self.rotary_percentage * self.head_size)
86
+
87
+ @classmethod
88
+ def from_name(cls, name: str, **kwargs: Any) -> Self:
89
+ if name not in name_to_config:
90
+ # search through all `config['hf_config']['name']`
91
+ try:
92
+ conf_dict = next(config for config in configs if name == config["hf_config"]["name"])
93
+ except StopIteration:
94
+ raise ValueError(f"{name!r} is not a supported config name")
95
+ else:
96
+ conf_dict = name_to_config[name]
97
+
98
+ conf_dict = conf_dict.copy()
99
+ if "condense_ratio" in kwargs: # legacy name
100
+ kwargs["rope_condense_ratio"] = kwargs.pop("condense_ratio")
101
+ conf_dict.update(kwargs)
102
+ return cls(**conf_dict)
103
+
104
+ @classmethod
105
+ def from_json(cls, path: Union[str, Path], **kwargs: Any) -> Self:
106
+ with open(path, encoding="utf-8") as fp:
107
+ json_kwargs = json.load(fp)
108
+ if "condense_ratio" in json_kwargs: # legacy name
109
+ json_kwargs["rope_condense_ratio"] = json_kwargs.pop("condense_ratio")
110
+ if "condense_ratio" in kwargs: # legacy name
111
+ kwargs["rope_condense_ratio"] = kwargs.pop("condense_ratio")
112
+ if "org" in json_kwargs: # legacy name
113
+ json_kwargs["hf_config"] = {"name": json_kwargs["name"], "org": json_kwargs.pop("org")}
114
+ if "org" in kwargs: # legacy name
115
+ kwargs["hf_config"] = {"name": kwargs.get("name", json_kwargs["name"]), "org": kwargs.pop("org")}
116
+ json_kwargs.update(kwargs)
117
+ return cls(**json_kwargs)
118
+
119
+ @classmethod
120
+ def from_checkpoint(cls, path: Path, **kwargs: Any) -> Self:
121
+ """Automatically load `lit_config.json` and if it doesn't exist - a matching config from `lit_gpt/config.py`."""
122
+ if (config_path := path / "lit_config.json").is_file():
123
+ return cls.from_json(config_path, **kwargs)
124
+ if (model_name := path.name) in name_to_config:
125
+ return cls.from_name(model_name, **kwargs)
126
+ raise FileNotFoundError(f"For {str(path)!r} neither 'lit_config.json' nor matching config exists.")
127
+
128
+ @property
129
+ def mlp_class(self) -> Type:
130
+ # `self._mlp_class` cannot be the type to keep the config json serializable
131
+ return getattr(model, self._mlp_class)
132
+
133
+ @property
134
+ def norm_class(self) -> Type:
135
+ # `self._norm_class` cannot be the type to keep the config json serializable
136
+ if self._norm_class == "RMSNorm":
137
+ from lit_gpt.rmsnorm import RMSNorm
138
+
139
+ return RMSNorm
140
+ return getattr(torch.nn, self._norm_class)
141
+
142
+
143
+ ########################
144
+ # Stability AI StableLM
145
+ ########################
146
+ configs = [
147
+ # https://huggingface.co/stabilityai/stablelm-base-alpha-3b/blob/main/config.json
148
+ dict(name="stablelm-base-alpha-3b", hf_config=dict(org="stabilityai", name="stablelm-base-alpha-3b")),
149
+ # https://huggingface.co/stabilityai/stablelm-base-alpha-7b/blob/main/config.json
150
+ dict(
151
+ name="stablelm-base-alpha-7b",
152
+ hf_config=dict(org="stabilityai", name="stablelm-base-alpha-7b"),
153
+ n_head=48,
154
+ n_embd=6144,
155
+ padding_multiple=256,
156
+ ),
157
+ # https://huggingface.co/stabilityai/stablelm-tuned-alpha-3b/blob/main/config.json
158
+ dict(name="stablelm-tuned-alpha-3b", hf_config=dict(org="stabilityai", name="stablelm-tuned-alpha-3b"), n_head=32),
159
+ # https://huggingface.co/stabilityai/stablelm-tuned-alpha-7b/blob/main/config.json
160
+ dict(
161
+ name="stablelm-tuned-alpha-7b",
162
+ hf_config=dict(org="stabilityai", name="stablelm-tuned-alpha-7b"),
163
+ n_head=48,
164
+ n_embd=6144,
165
+ padding_multiple=256,
166
+ ),
167
+ ]
168
+
169
+ ####################
170
+ # EleutherAI Pythia
171
+ ####################
172
+ pythia = [
173
+ # https://huggingface.co/EleutherAI/pythia-70m/blob/main/config.json
174
+ dict(
175
+ name="pythia-70m",
176
+ hf_config=dict(org="EleutherAI", name="pythia-70m"),
177
+ block_size=2048,
178
+ n_layer=6,
179
+ n_embd=512,
180
+ n_head=8,
181
+ padding_multiple=128,
182
+ ),
183
+ # https://huggingface.co/EleutherAI/pythia-160m/blob/main/config.json
184
+ dict(
185
+ name="pythia-160m",
186
+ hf_config=dict(org="EleutherAI", name="pythia-160m"),
187
+ block_size=2048,
188
+ n_layer=12,
189
+ n_embd=768,
190
+ n_head=12,
191
+ padding_multiple=128,
192
+ ),
193
+ # https://huggingface.co/EleutherAI/pythia-410m/blob/main/config.json
194
+ dict(
195
+ name="pythia-410m",
196
+ hf_config=dict(org="EleutherAI", name="pythia-410m"),
197
+ block_size=2048,
198
+ n_layer=24,
199
+ n_embd=1024,
200
+ n_head=16,
201
+ padding_multiple=128,
202
+ ),
203
+ # https://huggingface.co/EleutherAI/pythia-1b/blob/main/config.json
204
+ dict(
205
+ name="pythia-1b",
206
+ hf_config=dict(org="EleutherAI", name="pythia-1b"),
207
+ block_size=2048,
208
+ n_embd=2048,
209
+ n_head=8,
210
+ padding_multiple=128,
211
+ ),
212
+ # https://huggingface.co/EleutherAI/pythia-1.4b/blob/main/config.json
213
+ dict(
214
+ name="pythia-1.4b",
215
+ hf_config=dict(org="EleutherAI", name="pythia-1.4b"),
216
+ block_size=2048,
217
+ n_layer=24,
218
+ n_embd=2048,
219
+ n_head=16,
220
+ padding_multiple=128,
221
+ ),
222
+ # https://huggingface.co/EleutherAI/pythia-2.8b/blob/main/config.json
223
+ dict(
224
+ name="pythia-2.8b",
225
+ hf_config=dict(org="EleutherAI", name="pythia-2.8b"),
226
+ block_size=2048,
227
+ n_layer=32,
228
+ n_embd=2560,
229
+ padding_multiple=128,
230
+ ),
231
+ # https://huggingface.co/EleutherAI/pythia-6.9b/blob/main/config.json
232
+ dict(
233
+ name="pythia-6.9b",
234
+ hf_config=dict(org="EleutherAI", name="pythia-6.9b"),
235
+ block_size=2048,
236
+ n_layer=32,
237
+ padding_multiple=256,
238
+ ),
239
+ # https://huggingface.co/EleutherAI/pythia-12b/blob/main/config.json
240
+ dict(
241
+ name="pythia-12b",
242
+ hf_config=dict(org="EleutherAI", name="pythia-12b"),
243
+ block_size=2048,
244
+ n_layer=36,
245
+ n_embd=5120,
246
+ n_head=40,
247
+ ),
248
+ ]
249
+ configs.extend(pythia)
250
+ for c in pythia:
251
+ copy = deepcopy(c)
252
+ copy["name"] = f"{c['name']}-deduped"
253
+ copy["hf_config"]["name"] = f"{c['hf_config']['name']}-deduped"
254
+ configs.append(copy)
255
+
256
+
257
+ ####################################
258
+ # togethercomputer RedPajama INCITE
259
+ ####################################
260
+ redpajama_incite = [
261
+ # https://huggingface.co/togethercomputer/RedPajama-INCITE-Base-3B-v1/blob/main/config.json
262
+ dict(
263
+ name="RedPajama-INCITE-{}-3B-v1",
264
+ hf_config=dict(org="togethercomputer", name="RedPajama-INCITE-{}-3B-v1"),
265
+ block_size=2048,
266
+ n_layer=32,
267
+ n_embd=2560,
268
+ padding_multiple=256,
269
+ rotary_percentage=1.0,
270
+ parallel_residual=False,
271
+ ),
272
+ # https://huggingface.co/togethercomputer/RedPajama-INCITE-7B-Base/blob/main/config.json
273
+ dict(
274
+ name="RedPajama-INCITE-7B-{}",
275
+ hf_config=dict(org="togethercomputer", name="RedPajama-INCITE-7B-{}"),
276
+ block_size=2048,
277
+ n_layer=32,
278
+ padding_multiple=256,
279
+ rotary_percentage=1.0,
280
+ parallel_residual=False,
281
+ ),
282
+ # this redirects to the checkpoint above. kept for those who had the old weights already downloaded
283
+ dict(
284
+ name="RedPajama-INCITE-{}-7B-v0.1",
285
+ hf_config=dict(org="togethercomputer", name="RedPajama-INCITE-{}-7B-v0.1"),
286
+ block_size=2048,
287
+ n_layer=32,
288
+ padding_multiple=256,
289
+ rotary_percentage=1.0,
290
+ parallel_residual=False,
291
+ ),
292
+ ]
293
+ for c in redpajama_incite:
294
+ for kind in ("Base", "Chat", "Instruct"):
295
+ copy = deepcopy(c)
296
+ copy["name"] = c["name"].format(kind)
297
+ copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind)
298
+ configs.append(copy)
299
+
300
+
301
+ #################
302
+ # TII UAE Falcon
303
+ #################
304
+ falcon = [
305
+ # https://huggingface.co/tiiuae/falcon-7b/blob/main/config.json
306
+ dict(
307
+ name="falcon-7b{}",
308
+ hf_config=dict(org="tiiuae", name="falcon-7b{}"),
309
+ block_size=2048,
310
+ vocab_size=65024,
311
+ padded_vocab_size=65024,
312
+ n_layer=32,
313
+ n_head=71,
314
+ n_embd=4544,
315
+ rotary_percentage=1.0,
316
+ n_query_groups=1,
317
+ bias=False,
318
+ # this is not in the config, but in the original model implementation, only for this config
319
+ shared_attention_norm=True,
320
+ ),
321
+ # https://huggingface.co/tiiuae/falcon-40b/blob/main/config.json
322
+ dict(
323
+ name="falcon-40b{}",
324
+ hf_config=dict(org="tiiuae", name="falcon-40b{}"),
325
+ block_size=2048,
326
+ vocab_size=65024,
327
+ padded_vocab_size=65024,
328
+ n_layer=60,
329
+ n_head=128,
330
+ n_embd=8192,
331
+ rotary_percentage=1.0,
332
+ n_query_groups=8,
333
+ bias=False,
334
+ ),
335
+ ]
336
+ for c in falcon:
337
+ for kind in ("", "-instruct"):
338
+ copy = deepcopy(c)
339
+ copy["name"] = c["name"].format(kind)
340
+ copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind)
341
+ configs.append(copy)
342
+
343
+ # https://huggingface.co/tiiuae/falcon-180b/blob/main/config.json
344
+ falcon180b = dict(
345
+ name="falcon-180B{}",
346
+ hf_config=dict(org="tiiuae", name="falcon-180B{}"),
347
+ block_size=2048,
348
+ vocab_size=65024,
349
+ padded_vocab_size=65024,
350
+ n_layer=80,
351
+ n_head=232,
352
+ n_embd=14848,
353
+ rotary_percentage=1.0,
354
+ n_query_groups=8,
355
+ bias=False,
356
+ )
357
+
358
+ for kind in ("", "-chat"):
359
+ copy = deepcopy(falcon180b)
360
+ copy["name"] = falcon180b["name"].format(kind)
361
+ copy["hf_config"]["name"] = falcon180b["hf_config"]["name"].format(kind)
362
+ configs.append(copy)
363
+
364
+
365
+ #############################
366
+ # OpenLM Research Open LLaMA
367
+ #############################
368
+ open_LLaMA = [
369
+ # https://huggingface.co/openlm-research/open_llama_3b/blob/main/config.json
370
+ dict(
371
+ name="open_llama_3b",
372
+ hf_config=dict(org="openlm-research", name="open_llama_3b"),
373
+ block_size=2048,
374
+ vocab_size=32000,
375
+ padding_multiple=64,
376
+ n_layer=26,
377
+ n_embd=3200,
378
+ rotary_percentage=1.0,
379
+ parallel_residual=False,
380
+ bias=False,
381
+ _norm_class="RMSNorm",
382
+ norm_eps=1e-6,
383
+ _mlp_class="LLaMAMLP",
384
+ intermediate_size=8640,
385
+ ),
386
+ # https://huggingface.co/openlm-research/open_llama_7b/blob/main/config.json
387
+ dict(
388
+ name="open_llama_7b",
389
+ hf_config=dict(org="openlm-research", name="open_llama_7b"),
390
+ block_size=2048,
391
+ vocab_size=32000,
392
+ padding_multiple=64,
393
+ n_layer=32,
394
+ rotary_percentage=1.0,
395
+ parallel_residual=False,
396
+ bias=False,
397
+ _norm_class="RMSNorm",
398
+ norm_eps=1e-6,
399
+ _mlp_class="LLaMAMLP",
400
+ intermediate_size=11008,
401
+ ),
402
+ # https://huggingface.co/openlm-research/open_llama_13b/blob/main/config.json
403
+ dict(
404
+ name="open_llama_13b",
405
+ hf_config=dict(org="openlm-research", name="open_llama_13b"),
406
+ block_size=2048,
407
+ vocab_size=32000,
408
+ padding_multiple=64,
409
+ n_layer=40,
410
+ n_head=40,
411
+ n_embd=5120,
412
+ rotary_percentage=1.0,
413
+ parallel_residual=False,
414
+ bias=False,
415
+ _norm_class="RMSNorm",
416
+ norm_eps=1e-6,
417
+ _mlp_class="LLaMAMLP",
418
+ intermediate_size=13824,
419
+ ),
420
+ ]
421
+ configs.extend(open_LLaMA)
422
+
423
+
424
+ ###############
425
+ # LMSYS Vicuna
426
+ ###############
427
+ vicuna = [
428
+ # https://huggingface.co/lmsys/vicuna-7b-v1.3/blob/main/config.json
429
+ dict(
430
+ name="vicuna-7b-v1.3",
431
+ hf_config=dict(org="lmsys", name="vicuna-7b-v1.3"),
432
+ block_size=2048,
433
+ vocab_size=32000,
434
+ padding_multiple=64,
435
+ n_layer=32,
436
+ rotary_percentage=1.0,
437
+ parallel_residual=False,
438
+ bias=False,
439
+ _norm_class="RMSNorm",
440
+ norm_eps=1e-6,
441
+ _mlp_class="LLaMAMLP",
442
+ intermediate_size=11008,
443
+ ),
444
+ # https://huggingface.co/lmsys/vicuna-13b-v1.3/blob/main/config.json
445
+ dict(
446
+ name="vicuna-13b-v1.3",
447
+ hf_config=dict(org="lmsys", name="vicuna-13b-v1.3"),
448
+ block_size=2048,
449
+ vocab_size=32000,
450
+ padding_multiple=64,
451
+ n_layer=40,
452
+ n_head=40,
453
+ n_embd=5120,
454
+ rotary_percentage=1.0,
455
+ parallel_residual=False,
456
+ bias=False,
457
+ _norm_class="RMSNorm",
458
+ norm_eps=1e-6,
459
+ _mlp_class="LLaMAMLP",
460
+ intermediate_size=13824,
461
+ ),
462
+ # https://huggingface.co/lmsys/vicuna-33b-v1.3/blob/main/config.json
463
+ dict(
464
+ name="vicuna-33b-v1.3",
465
+ hf_config=dict(org="lmsys", name="vicuna-33b-v1.3"),
466
+ block_size=2048,
467
+ vocab_size=32000,
468
+ padding_multiple=64,
469
+ n_layer=60,
470
+ n_head=52,
471
+ n_embd=6656,
472
+ rotary_percentage=1.0,
473
+ parallel_residual=False,
474
+ bias=False,
475
+ _norm_class="RMSNorm",
476
+ norm_eps=1e-6,
477
+ _mlp_class="LLaMAMLP",
478
+ intermediate_size=17920,
479
+ ),
480
+ # https://huggingface.co/lmsys/vicuna-7b-v1.5/blob/main/config.json
481
+ dict(
482
+ name="vicuna-7b-v1.5",
483
+ hf_config=dict(org="lmsys", name="vicuna-7b-v1.5"),
484
+ vocab_size=32000,
485
+ padding_multiple=64,
486
+ n_layer=32,
487
+ rotary_percentage=1.0,
488
+ parallel_residual=False,
489
+ bias=False,
490
+ _norm_class="RMSNorm",
491
+ _mlp_class="LLaMAMLP",
492
+ intermediate_size=11008,
493
+ ),
494
+ # https://huggingface.co/lmsys/vicuna-7b-v1.5-16k/blob/main/config.json
495
+ dict(
496
+ name="vicuna-7b-v1.5-16k",
497
+ hf_config=dict(org="lmsys", name="vicuna-7b-v1.5-16k"),
498
+ block_size=16384,
499
+ vocab_size=32000,
500
+ padding_multiple=64,
501
+ n_layer=32,
502
+ rotary_percentage=1.0,
503
+ parallel_residual=False,
504
+ bias=False,
505
+ _norm_class="RMSNorm",
506
+ _mlp_class="LLaMAMLP",
507
+ intermediate_size=11008,
508
+ rope_condense_ratio=4,
509
+ ),
510
+ # https://huggingface.co/lmsys/vicuna-13b-v1.5/blob/main/config.json
511
+ dict(
512
+ name="vicuna-13b-v1.5",
513
+ hf_config=dict(org="lmsys", name="vicuna-13b-v1.5"),
514
+ vocab_size=32000,
515
+ padding_multiple=64,
516
+ n_layer=40,
517
+ n_head=40,
518
+ n_embd=5120,
519
+ rotary_percentage=1.0,
520
+ parallel_residual=False,
521
+ bias=False,
522
+ _norm_class="RMSNorm",
523
+ _mlp_class="LLaMAMLP",
524
+ intermediate_size=13824,
525
+ ),
526
+ # https://huggingface.co/lmsys/vicuna-13b-v1.5-16k/blob/main/config.json
527
+ dict(
528
+ name="vicuna-13b-v1.5-16k",
529
+ hf_config=dict(org="lmsys", name="vicuna-13b-v1.5-16k"),
530
+ block_size=16384,
531
+ vocab_size=32000,
532
+ padding_multiple=64,
533
+ n_layer=40,
534
+ n_head=40,
535
+ n_embd=5120,
536
+ rotary_percentage=1.0,
537
+ parallel_residual=False,
538
+ bias=False,
539
+ _norm_class="RMSNorm",
540
+ _mlp_class="LLaMAMLP",
541
+ intermediate_size=13824,
542
+ rope_condense_ratio=4,
543
+ ),
544
+ ]
545
+ configs.extend(vicuna)
546
+
547
+
548
+ #################
549
+ # LMSYS LongChat
550
+ #################
551
+ long_chat = [
552
+ # https://huggingface.co/lmsys/longchat-7b-16k/blob/main/config.json
553
+ dict(
554
+ name="longchat-7b-16k",
555
+ hf_config=dict(org="lmsys", name="longchat-7b-16k"),
556
+ block_size=16384,
557
+ vocab_size=32000,
558
+ padding_multiple=64,
559
+ n_layer=32,
560
+ rotary_percentage=1.0,
561
+ parallel_residual=False,
562
+ bias=False,
563
+ _norm_class="RMSNorm",
564
+ norm_eps=1e-6,
565
+ _mlp_class="LLaMAMLP",
566
+ intermediate_size=11008,
567
+ rope_condense_ratio=8,
568
+ ),
569
+ # https://huggingface.co/lmsys/longchat-13b-16k/blob/main/config.json
570
+ dict(
571
+ name="longchat-13b-16k",
572
+ hf_config=dict(org="lmsys", name="longchat-13b-16k"),
573
+ block_size=16384,
574
+ vocab_size=32000,
575
+ padding_multiple=64,
576
+ n_layer=40,
577
+ n_head=40,
578
+ n_embd=5120,
579
+ rotary_percentage=1.0,
580
+ parallel_residual=False,
581
+ bias=False,
582
+ _norm_class="RMSNorm",
583
+ norm_eps=1e-6,
584
+ _mlp_class="LLaMAMLP",
585
+ intermediate_size=13824,
586
+ rope_condense_ratio=8,
587
+ ),
588
+ ]
589
+ configs.extend(long_chat)
590
+
591
+
592
+ ######################
593
+ # NousResearch Hermes
594
+ ######################
595
+ nous_research = [
596
+ # https://huggingface.co/NousResearch/Nous-Hermes-llama-2-7b/blob/main/config.json
597
+ dict(
598
+ name="Nous-Hermes-llama-2-7b",
599
+ hf_config=dict(org="NousResearch", name="Nous-Hermes-llama-2-7b"),
600
+ padded_vocab_size=32000,
601
+ n_layer=32,
602
+ rotary_percentage=1.0,
603
+ parallel_residual=False,
604
+ bias=False,
605
+ _norm_class="RMSNorm",
606
+ norm_eps=1e-05,
607
+ _mlp_class="LLaMAMLP",
608
+ intermediate_size=11008,
609
+ ),
610
+ # https://huggingface.co/NousResearch/Nous-Hermes-13B/blob/main/config.json
611
+ dict(
612
+ name="Nous-Hermes-13b",
613
+ hf_config=dict(org="NousResearch", name="Nous-Hermes-13b"),
614
+ block_size=2048,
615
+ vocab_size=32000,
616
+ padded_vocab_size=32001,
617
+ n_layer=40,
618
+ n_head=40,
619
+ n_embd=5120,
620
+ rotary_percentage=1.0,
621
+ parallel_residual=False,
622
+ bias=False,
623
+ _norm_class="RMSNorm",
624
+ norm_eps=1e-6,
625
+ _mlp_class="LLaMAMLP",
626
+ intermediate_size=13824,
627
+ ),
628
+ # https://huggingface.co/NousResearch/Nous-Hermes-Llama2-13b
629
+ dict(
630
+ name="Nous-Hermes-Llama2-13b",
631
+ hf_config=dict(org="NousResearch", name="Nous-Hermes-Llama2-13b"),
632
+ vocab_size=32000,
633
+ padded_vocab_size=32032,
634
+ n_layer=40,
635
+ n_head=40,
636
+ n_embd=5120,
637
+ rotary_percentage=1.0,
638
+ parallel_residual=False,
639
+ bias=False,
640
+ _norm_class="RMSNorm",
641
+ norm_eps=1e-05,
642
+ _mlp_class="LLaMAMLP",
643
+ intermediate_size=13824,
644
+ ),
645
+ ]
646
+ configs.extend(nous_research)
647
+
648
+
649
+ ###############
650
+ # Meta LLaMA 2
651
+ ###############
652
+ llama_2 = [
653
+ # https://huggingface.co/meta-llama/Llama-2-7b-hf/blob/main/config.json
654
+ dict(
655
+ name="Llama-2-7b{}-hf",
656
+ hf_config=dict(org="meta-llama", name="Llama-2-7b{}-hf"),
657
+ vocab_size=32000,
658
+ padding_multiple=64,
659
+ n_layer=32,
660
+ rotary_percentage=1.0,
661
+ parallel_residual=False,
662
+ bias=False,
663
+ _norm_class="RMSNorm",
664
+ _mlp_class="LLaMAMLP",
665
+ intermediate_size=11008,
666
+ ),
667
+ # https://huggingface.co/meta-llama/Llama-2-13b-hf/blob/main/config.json
668
+ dict(
669
+ name="Llama-2-13b{}-hf",
670
+ hf_config=dict(org="meta-llama", name="Llama-2-13b{}-hf"),
671
+ vocab_size=32000,
672
+ padding_multiple=64,
673
+ n_layer=40,
674
+ n_head=40,
675
+ n_embd=5120,
676
+ rotary_percentage=1.0,
677
+ parallel_residual=False,
678
+ bias=False,
679
+ _norm_class="RMSNorm",
680
+ _mlp_class="LLaMAMLP",
681
+ intermediate_size=13824,
682
+ ),
683
+ # https://huggingface.co/meta-llama/Llama-2-70b-hf/blob/main/config.json
684
+ dict(
685
+ name="Llama-2-70b{}-hf",
686
+ hf_config=dict(org="meta-llama", name="Llama-2-70b{}-hf"),
687
+ vocab_size=32000,
688
+ padding_multiple=64,
689
+ n_layer=80,
690
+ n_head=64,
691
+ n_embd=8192,
692
+ n_query_groups=8,
693
+ rotary_percentage=1.0,
694
+ parallel_residual=False,
695
+ bias=False,
696
+ _norm_class="RMSNorm",
697
+ _mlp_class="LLaMAMLP",
698
+ intermediate_size=28672,
699
+ ),
700
+ ]
701
+ for c in llama_2:
702
+ for kind in ("", "-chat"):
703
+ copy = deepcopy(c)
704
+ copy["name"] = c["name"].format(kind)
705
+ copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind)
706
+ configs.append(copy)
707
+
708
+
709
+ ##########################
710
+ # Stability AI FreeWilly2
711
+ ##########################
712
+ freewilly_2 = [
713
+ # https://huggingface.co/stabilityai/FreeWilly2/blob/main/config.json
714
+ dict(
715
+ name="FreeWilly2",
716
+ hf_config=dict(org="stabilityai", name="FreeWilly2"),
717
+ vocab_size=32000,
718
+ padding_multiple=64,
719
+ n_layer=80,
720
+ n_head=64,
721
+ n_embd=8192,
722
+ n_query_groups=8,
723
+ rotary_percentage=1.0,
724
+ parallel_residual=False,
725
+ bias=False,
726
+ _norm_class="RMSNorm",
727
+ _mlp_class="LLaMAMLP",
728
+ intermediate_size=28672,
729
+ )
730
+ ]
731
+ configs.extend(freewilly_2)
732
+
733
+
734
+ ##################
735
+ # Meta Code Llama
736
+ ##################
737
+ code_llama = [
738
+ # https://huggingface.co/codellama/CodeLlama-7b-hf/blob/main/config.json
739
+ dict(
740
+ name="CodeLlama-7b-hf",
741
+ hf_config=dict(org="codellama", name="CodeLlama-7b-hf"),
742
+ block_size=16384,
743
+ vocab_size=32016,
744
+ padding_multiple=16,
745
+ n_layer=32,
746
+ rotary_percentage=1.0,
747
+ parallel_residual=False,
748
+ bias=False,
749
+ _norm_class="RMSNorm",
750
+ norm_eps=1e-05,
751
+ _mlp_class="LLaMAMLP",
752
+ intermediate_size=11008,
753
+ rope_base=1000000,
754
+ ),
755
+ # https://huggingface.co/codellama/CodeLlama-13b-hf/blob/main/config.json
756
+ dict(
757
+ name="CodeLlama-13b-hf",
758
+ hf_config=dict(org="codellama", name="CodeLlama-13b-hf"),
759
+ block_size=16384,
760
+ vocab_size=32016,
761
+ padding_multiple=16,
762
+ n_layer=40,
763
+ n_head=40,
764
+ n_embd=5120,
765
+ rotary_percentage=1.0,
766
+ parallel_residual=False,
767
+ bias=False,
768
+ _norm_class="RMSNorm",
769
+ norm_eps=1e-05,
770
+ _mlp_class="LLaMAMLP",
771
+ intermediate_size=13824,
772
+ rope_base=1000000,
773
+ ),
774
+ # https://huggingface.co/codellama/CodeLlama-34b-hf/blob/main/config.json
775
+ dict(
776
+ name="CodeLlama-34b-hf",
777
+ hf_config=dict(org="codellama", name="CodeLlama-34b-hf"),
778
+ block_size=16384,
779
+ vocab_size=32000,
780
+ padding_multiple=64,
781
+ n_layer=48,
782
+ n_head=64,
783
+ n_embd=8192,
784
+ n_query_groups=8,
785
+ rotary_percentage=1.0,
786
+ parallel_residual=False,
787
+ bias=False,
788
+ _norm_class="RMSNorm",
789
+ norm_eps=1e-05,
790
+ _mlp_class="LLaMAMLP",
791
+ intermediate_size=22016,
792
+ rope_base=1000000,
793
+ ),
794
+ # https://huggingface.co/codellama/CodeLlama-7b-Python-hf/blob/main/config.json
795
+ dict(
796
+ name="CodeLlama-7b-Python-hf",
797
+ hf_config=dict(org="codellama", name="CodeLlama-7b-Python-hf"),
798
+ block_size=16384,
799
+ vocab_size=32000,
800
+ padding_multiple=64,
801
+ n_layer=32,
802
+ rotary_percentage=1.0,
803
+ parallel_residual=False,
804
+ bias=False,
805
+ _norm_class="RMSNorm",
806
+ norm_eps=1e-05,
807
+ _mlp_class="LLaMAMLP",
808
+ intermediate_size=11008,
809
+ rope_base=1000000,
810
+ ),
811
+ # https://huggingface.co/codellama/CodeLlama-13b-Python-hf/blob/main/config.json
812
+ dict(
813
+ name="CodeLlama-13b-Python-hf",
814
+ hf_config=dict(org="codellama", name="CodeLlama-13b-Python-hf"),
815
+ block_size=16384,
816
+ vocab_size=32000,
817
+ padding_multiple=64,
818
+ n_layer=40,
819
+ n_head=40,
820
+ n_embd=5120,
821
+ rotary_percentage=1.0,
822
+ parallel_residual=False,
823
+ bias=False,
824
+ _norm_class="RMSNorm",
825
+ norm_eps=1e-05,
826
+ _mlp_class="LLaMAMLP",
827
+ intermediate_size=13824,
828
+ rope_base=1000000,
829
+ ),
830
+ # https://huggingface.co/codellama/CodeLlama-34b-Python-hf/blob/main/config.json
831
+ dict(
832
+ name="CodeLlama-34b-Python-hf",
833
+ hf_config=dict(org="codellama", name="CodeLlama-34b-Python-hf"),
834
+ block_size=16384,
835
+ vocab_size=32000,
836
+ padding_multiple=64,
837
+ n_layer=48,
838
+ n_head=64,
839
+ n_embd=8192,
840
+ n_query_groups=8,
841
+ rotary_percentage=1.0,
842
+ parallel_residual=False,
843
+ bias=False,
844
+ _norm_class="RMSNorm",
845
+ norm_eps=1e-05,
846
+ _mlp_class="LLaMAMLP",
847
+ intermediate_size=22016,
848
+ rope_base=1000000,
849
+ ),
850
+ # https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf/tree/main/config.json
851
+ dict(
852
+ name="CodeLlama-7b-Instruct-hf",
853
+ hf_config=dict(org="codellama", name="CodeLlama-7b-Instruct-hf"),
854
+ block_size=16384,
855
+ vocab_size=32016,
856
+ padding_multiple=16,
857
+ n_layer=32,
858
+ rotary_percentage=1.0,
859
+ parallel_residual=False,
860
+ bias=False,
861
+ _norm_class="RMSNorm",
862
+ norm_eps=1e-05,
863
+ _mlp_class="LLaMAMLP",
864
+ intermediate_size=11008,
865
+ rope_base=1000000,
866
+ ),
867
+ # https://huggingface.co/codellama/CodeLlama-13b-Instruct-hf/blob/main/config.json
868
+ dict(
869
+ name="CodeLlama-13b-Instruct-hf",
870
+ hf_config=dict(org="codellama", name="CodeLlama-13b-Instruct-hf"),
871
+ block_size=2048,
872
+ vocab_size=32016,
873
+ padding_multiple=16,
874
+ n_layer=40,
875
+ n_head=40,
876
+ n_embd=5120,
877
+ rotary_percentage=1.0,
878
+ parallel_residual=False,
879
+ bias=False,
880
+ _norm_class="RMSNorm",
881
+ norm_eps=1e-05,
882
+ _mlp_class="LLaMAMLP",
883
+ intermediate_size=13824,
884
+ rope_base=1000000,
885
+ ),
886
+ # https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf/blob/main/config.json
887
+ dict(
888
+ name="CodeLlama-34b-Instruct-hf",
889
+ hf_config=dict(org="codellama", name="CodeLlama-34b-Instruct-hf"),
890
+ block_size=16384,
891
+ vocab_size=32000,
892
+ padding_multiple=64,
893
+ n_layer=48,
894
+ n_head=64,
895
+ n_embd=8192,
896
+ n_query_groups=8,
897
+ rotary_percentage=1.0,
898
+ parallel_residual=False,
899
+ bias=False,
900
+ _norm_class="RMSNorm",
901
+ norm_eps=1e-05,
902
+ _mlp_class="LLaMAMLP",
903
+ intermediate_size=22016,
904
+ rope_base=1000000,
905
+ ),
906
+ ]
907
+ configs.extend(code_llama)
908
+
909
+
910
+ ########################
911
+ # garage-bAInd Platypus
912
+ ########################
913
+ platypus = [
914
+ # https://huggingface.co/garage-bAInd/Platypus-30B/blob/main/config.json
915
+ dict(
916
+ name="Platypus-30B",
917
+ hf_config=dict(org="garage-bAInd", name="Platypus-30B"),
918
+ block_size=2048,
919
+ padded_vocab_size=32000,
920
+ n_layer=60,
921
+ n_head=52,
922
+ n_embd=6656,
923
+ rotary_percentage=1.0,
924
+ parallel_residual=False,
925
+ bias=False,
926
+ _norm_class="RMSNorm",
927
+ norm_eps=1e-06,
928
+ _mlp_class="LLaMAMLP",
929
+ intermediate_size=17920,
930
+ ),
931
+ # https://huggingface.co/garage-bAInd/Platypus2-7B/blob/main/config.json
932
+ dict(
933
+ name="Platypus2-7B",
934
+ hf_config=dict(org="garage-bAInd", name="Platypus2-7B"),
935
+ padded_vocab_size=32000,
936
+ n_layer=32,
937
+ rotary_percentage=1.0,
938
+ parallel_residual=False,
939
+ bias=False,
940
+ _norm_class="RMSNorm",
941
+ norm_eps=1e-05,
942
+ _mlp_class="LLaMAMLP",
943
+ intermediate_size=11008,
944
+ ),
945
+ # https://huggingface.co/garage-bAInd/Platypus2-13B/blob/main/config.json
946
+ dict(
947
+ name="Platypus2-13B",
948
+ hf_config=dict(org="garage-bAInd", name="Platypus2-13B"),
949
+ padded_vocab_size=32000,
950
+ n_layer=40,
951
+ n_head=40,
952
+ n_embd=5120,
953
+ rotary_percentage=1.0,
954
+ parallel_residual=False,
955
+ bias=False,
956
+ _norm_class="RMSNorm",
957
+ norm_eps=1e-05,
958
+ _mlp_class="LLaMAMLP",
959
+ intermediate_size=13824,
960
+ ),
961
+ # https://huggingface.co/garage-bAInd/Platypus2-70B/blob/main/config.json
962
+ dict(
963
+ name="Platypus2-70B",
964
+ hf_config=dict(org="garage-bAInd", name="Platypus2-70B"),
965
+ padded_vocab_size=32000,
966
+ n_layer=80,
967
+ n_head=64,
968
+ n_embd=8192,
969
+ rotary_percentage=1.0,
970
+ parallel_residual=False,
971
+ bias=False,
972
+ _norm_class="RMSNorm",
973
+ _mlp_class="LLaMAMLP",
974
+ intermediate_size=28672,
975
+ ),
976
+ # https://huggingface.co/garage-bAInd/Camel-Platypus2-13B/blob/main/config.json
977
+ dict(
978
+ name="Camel-Platypus2-13B",
979
+ hf_config=dict(org="garage-bAInd", name="Camel-Platypus2-13B"),
980
+ padded_vocab_size=32000,
981
+ n_layer=40,
982
+ n_head=40,
983
+ n_embd=5120,
984
+ rotary_percentage=1.0,
985
+ parallel_residual=False,
986
+ bias=False,
987
+ _norm_class="RMSNorm",
988
+ _mlp_class="LLaMAMLP",
989
+ intermediate_size=13824,
990
+ ),
991
+ # https://huggingface.co/garage-bAInd/Camel-Platypus2-70B/blob/main/config.json
992
+ dict(
993
+ name="Camel-Platypus2-70B",
994
+ hf_config=dict(org="garage-bAInd", name="Camel-Platypus2-70B"),
995
+ padded_vocab_size=32000,
996
+ n_layer=80,
997
+ n_head=64,
998
+ n_embd=8192,
999
+ n_query_groups=8,
1000
+ rotary_percentage=1.0,
1001
+ parallel_residual=False,
1002
+ bias=False,
1003
+ _norm_class="RMSNorm",
1004
+ _mlp_class="LLaMAMLP",
1005
+ intermediate_size=28672,
1006
+ ),
1007
+ # https://huggingface.co/garage-bAInd/Stable-Platypus2-13B/blob/main/config.json
1008
+ dict(
1009
+ name="Stable-Platypus2-13B",
1010
+ hf_config=dict(org="garage-bAInd", name="Stable-Platypus2-13B"),
1011
+ padded_vocab_size=32000,
1012
+ n_layer=40,
1013
+ n_head=40,
1014
+ n_embd=5120,
1015
+ rotary_percentage=1.0,
1016
+ parallel_residual=False,
1017
+ bias=False,
1018
+ _norm_class="RMSNorm",
1019
+ _mlp_class="LLaMAMLP",
1020
+ intermediate_size=13824,
1021
+ ),
1022
+ # https://huggingface.co/garage-bAInd/Platypus2-70B-instruct/blob/main/config.json
1023
+ dict(
1024
+ name="Platypus2-70B-instruct",
1025
+ hf_config=dict(org="garage-bAInd", name="Platypus2-70B-instruct"),
1026
+ padded_vocab_size=32000,
1027
+ n_layer=80,
1028
+ n_head=64,
1029
+ n_embd=8192,
1030
+ n_query_groups=8,
1031
+ rotary_percentage=1.0,
1032
+ parallel_residual=False,
1033
+ bias=False,
1034
+ _norm_class="RMSNorm",
1035
+ _mlp_class="LLaMAMLP",
1036
+ intermediate_size=28672,
1037
+ ),
1038
+ ]
1039
+ configs.extend(platypus)
1040
+
1041
+
1042
+ ##########################
1043
+ # Stability AI StableCode
1044
+ ##########################
1045
+ stablecode = [
1046
+ # https://huggingface.co/stabilityai/stablecode-completion-alpha-3b/blob/main/config.json
1047
+ dict(
1048
+ name="stablecode-completion-alpha-3b",
1049
+ hf_config=dict(org="stabilityai", name="stablecode-completion-alpha-3b"),
1050
+ block_size=16384,
1051
+ vocab_size=49152,
1052
+ n_layer=32,
1053
+ n_embd=2560,
1054
+ ),
1055
+ # https://huggingface.co/stabilityai/stablecode-completion-alpha-3b-4k/blob/main/config.json
1056
+ dict(
1057
+ name="stablecode-completion-alpha-3b-4k",
1058
+ hf_config=dict(org="stabilityai", name="stablecode-completion-alpha-3b-4k"),
1059
+ vocab_size=49152,
1060
+ n_layer=32,
1061
+ n_embd=2560,
1062
+ ),
1063
+ # https://huggingface.co/stabilityai/stablecode-instruct-alpha-3b/blob/main/config.json
1064
+ dict(
1065
+ name="stablecode-instruct-alpha-3b",
1066
+ hf_config=dict(org="stabilityai", name="stablecode-instruct-alpha-3b"),
1067
+ vocab_size=49152,
1068
+ n_layer=32,
1069
+ n_embd=2560,
1070
+ ),
1071
+ ]
1072
+ configs.extend(stablecode)
1073
+
1074
+
1075
+ ##################################
1076
+ # togethercomputer LLaMA-2-7B-32K
1077
+ ##################################
1078
+ together_llama2_32k = [
1079
+ # https://huggingface.co/togethercomputer/LLaMA-2-7B-32K/blob/main/config.json
1080
+ dict(
1081
+ name="LLaMA-2-7B-32K",
1082
+ hf_config=dict(org="togethercomputer", name="LLaMA-2-7B-32K"),
1083
+ vocab_size=32000,
1084
+ padding_multiple=64,
1085
+ n_layer=32,
1086
+ rotary_percentage=1.0,
1087
+ parallel_residual=False,
1088
+ bias=False,
1089
+ _norm_class="RMSNorm",
1090
+ _mlp_class="LLaMAMLP",
1091
+ intermediate_size=11008,
1092
+ rope_condense_ratio=8,
1093
+ )
1094
+ ]
1095
+ configs.extend(together_llama2_32k)
1096
+
1097
+
1098
+ ################
1099
+ # Microsoft Phi
1100
+ ################
1101
+ phi = [
1102
+ # https://huggingface.co/microsoft/phi-1_5/blob/main/config.json
1103
+ dict(
1104
+ name="phi-1_5",
1105
+ hf_config=dict(org="microsoft", name="phi-1_5"),
1106
+ vocab_size=50257,
1107
+ padded_vocab_size=51200,
1108
+ block_size=2048,
1109
+ n_embd=2048,
1110
+ n_layer=24,
1111
+ rotary_percentage=0.5, # 32 / (n_embd / n_head) = 32 / 64
1112
+ shared_attention_norm=True,
1113
+ lm_head_bias=True,
1114
+ gelu_approximate="tanh",
1115
+ )
1116
+ ]
1117
+ configs.extend(phi)
1118
+
1119
+
1120
+ #############
1121
+ # Mistral AI
1122
+ #############
1123
+ mistral = [
1124
+ # https://huggingface.co/mistralai/Mistral-7B-v0.1/blob/main/config.json
1125
+ dict(
1126
+ name="Mistral-7B-{}v0.1",
1127
+ hf_config=dict(org="mistralai", name="Mistral-7B-{}v0.1"),
1128
+ padded_vocab_size=32000,
1129
+ block_size=4096, # should be 32768 but sliding window attention is not implemented
1130
+ n_layer=32,
1131
+ n_query_groups=8,
1132
+ rotary_percentage=1.0,
1133
+ parallel_residual=False,
1134
+ bias=False,
1135
+ _norm_class="RMSNorm",
1136
+ norm_eps=1e-05,
1137
+ _mlp_class="LLaMAMLP",
1138
+ intermediate_size=14336,
1139
+ )
1140
+ ]
1141
+ for c in mistral:
1142
+ for kind in ("", "Instruct-"):
1143
+ copy = deepcopy(c)
1144
+ copy["name"] = c["name"].format(kind)
1145
+ copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind)
1146
+ configs.append(copy)
1147
+
1148
+
1149
+ ############
1150
+ # TinyLlama
1151
+ ############
1152
+ tiny_llama = [
1153
+ dict(
1154
+ name="tiny-llama-1.1b",
1155
+ hf_config=dict(org="TinyLlama", name="TinyLlama-1.1B-intermediate-step-955k-token-2T"),
1156
+ block_size=2048,
1157
+ vocab_size=32000,
1158
+ padding_multiple=64,
1159
+ n_layer=22,
1160
+ n_head=32,
1161
+ n_embd=2048,
1162
+ rotary_percentage=1.0,
1163
+ parallel_residual=False,
1164
+ bias=False,
1165
+ _norm_class="RMSNorm", # original TinyLlama uses FusedRMSNorm
1166
+ norm_eps=1e-5,
1167
+ _mlp_class="LLaMAMLP",
1168
+ intermediate_size=5632,
1169
+ n_query_groups=4,
1170
+ )
1171
+ ]
1172
+ configs.extend(tiny_llama)
1173
+
1174
+
1175
+ name_to_config = {config["name"]: config for config in configs}
model.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Full definition of a GPT NeoX Language Model, all of it in this single file.
2
+
3
+ Based on the nanoGPT implementation: https://github.com/karpathy/nanoGPT and
4
+ https://github.com/EleutherAI/gpt-neox/tree/main/megatron/model.
5
+ """
6
+ import math
7
+ from typing import Any, Optional, Tuple
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ from typing_extensions import Self
12
+
13
+ from config import *
14
+
15
+
16
+ class GPT(nn.Module):
17
+ def __init__(self, config: Config) -> None:
18
+ super().__init__()
19
+ assert config.padded_vocab_size is not None
20
+ self.config = config
21
+
22
+ self.lm_head = nn.Linear(config.n_embd, config.padded_vocab_size, bias=config.lm_head_bias)
23
+ self.transformer = nn.ModuleDict(
24
+ dict(
25
+ wte=nn.Embedding(config.padded_vocab_size, config.n_embd),
26
+ h=nn.ModuleList(Block(config) for _ in range(config.n_layer)),
27
+ ln_f=config.norm_class(config.n_embd, eps=config.norm_eps),
28
+ )
29
+ )
30
+ self.max_seq_length = self.config.block_size
31
+ self.mask_cache: Optional[torch.Tensor] = None
32
+
33
+ @property
34
+ def max_seq_length(self) -> int:
35
+ return self._max_seq_length
36
+
37
+ @max_seq_length.setter
38
+ def max_seq_length(self, value: int) -> None:
39
+ """
40
+ When doing inference, the sequences used might be shorter than the model's context length.
41
+ This allows setting a smaller number to avoid allocating unused memory
42
+ """
43
+ if value > self.config.block_size:
44
+ raise ValueError(f"Cannot attend to {value}, block size is only {self.config.block_size}")
45
+ self._max_seq_length = value
46
+ if not hasattr(self, "cos"):
47
+ # first call
48
+ cos, sin = self.rope_cache()
49
+ self.register_buffer("cos", cos, persistent=False)
50
+ self.register_buffer("sin", sin, persistent=False)
51
+ elif value != self.cos.size(0):
52
+ # override
53
+ self.cos, self.sin = self.rope_cache(device=self.cos.device)
54
+ # the mask and kv cache size will get updated on `set_kv_cache`. we cannot update it here because we don't know
55
+ # if the kv cache is expected
56
+
57
+ def reset_parameters(self) -> None:
58
+ # Trigger resetting the rope-cache
59
+ self.max_seq_length = self.config.block_size
60
+
61
+ def _init_weights(self, module: nn.Module) -> None:
62
+ """Meant to be used with `gpt.apply(gpt._init_weights)`."""
63
+ if isinstance(module, nn.Linear):
64
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
65
+ if module.bias is not None:
66
+ torch.nn.init.zeros_(module.bias)
67
+ elif isinstance(module, nn.Embedding):
68
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
69
+
70
+ def forward(self, idx: torch.Tensor, input_pos: Optional[torch.Tensor] = None) -> torch.Tensor:
71
+ T = idx.size(1)
72
+ if self.max_seq_length < T:
73
+ raise ValueError(f"Cannot forward sequence of length {T}, max seq length is only {self.max_seq_length}.")
74
+
75
+ if input_pos is not None: # use the kv cache
76
+ cos = self.cos.index_select(0, input_pos)
77
+ sin = self.sin.index_select(0, input_pos)
78
+ if self.mask_cache is None:
79
+ raise TypeError("You need to call `gpt.set_kv_cache()`")
80
+ mask = self.mask_cache.index_select(2, input_pos)
81
+ else:
82
+ cos = self.cos[:T]
83
+ sin = self.sin[:T]
84
+ mask = None
85
+
86
+ x = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
87
+ for block in self.transformer.h:
88
+ x = block(x, cos, sin, mask, input_pos)
89
+ x = self.transformer.ln_f(x)
90
+ return self.lm_head(x) # (b, t, vocab_size)
91
+
92
+ @classmethod
93
+ def from_name(cls, name: str, **kwargs: Any) -> Self:
94
+ return cls(Config.from_name(name, **kwargs))
95
+
96
+ def rope_cache(self, device: Optional[torch.device] = None) -> Tuple[torch.Tensor, torch.Tensor]:
97
+ return build_rope_cache(
98
+ seq_len=self.max_seq_length,
99
+ n_elem=self.config.rope_n_elem,
100
+ device=device,
101
+ condense_ratio=self.config.rope_condense_ratio,
102
+ base=self.config.rope_base,
103
+ )
104
+
105
+ def set_kv_cache(
106
+ self,
107
+ batch_size: int,
108
+ rope_cache_length: Optional[int] = None,
109
+ device: Optional[torch.device] = None,
110
+ dtype: Optional[torch.dtype] = None,
111
+ ) -> None:
112
+ if rope_cache_length is None:
113
+ rope_cache_length = self.cos.size(-1)
114
+ max_seq_length = self.max_seq_length
115
+
116
+ # initialize the kv cache for all blocks
117
+ for block in self.transformer.h:
118
+ block.attn.kv_cache = block.attn.build_kv_cache(
119
+ batch_size, max_seq_length, rope_cache_length, device, dtype
120
+ )
121
+
122
+ if self.mask_cache is None or self.mask_cache.size(3) != max_seq_length:
123
+ # passing `attn_mask` to SDPA downgrades it to use the inefficient implementation. since we only need the mask
124
+ # for the kv-cache support (only during inference), we only create it in that situation
125
+ # this will be resolved by https://github.com/pytorch/pytorch/issues/96099
126
+ ones = torch.ones((max_seq_length, max_seq_length), device=device, dtype=torch.bool)
127
+ self.mask_cache = torch.tril(ones).unsqueeze(0).unsqueeze(0)
128
+
129
+ def clear_kv_cache(self) -> None:
130
+ self.mask_cache = None
131
+ for block in self.transformer.h:
132
+ block.attn.kv_cache = None
133
+
134
+
135
+ class Block(nn.Module):
136
+ def __init__(self, config: Config) -> None:
137
+ super().__init__()
138
+ self.norm_1 = config.norm_class(config.n_embd, eps=config.norm_eps)
139
+ self.attn = CausalSelfAttention(config)
140
+ self.norm_2 = None if config.shared_attention_norm else config.norm_class(config.n_embd, eps=config.norm_eps)
141
+ self.mlp = config.mlp_class(config)
142
+
143
+ self.config = config
144
+
145
+ def forward(
146
+ self,
147
+ x: torch.Tensor,
148
+ cos: torch.Tensor,
149
+ sin: torch.Tensor,
150
+ mask: Optional[torch.Tensor] = None,
151
+ input_pos: Optional[torch.Tensor] = None,
152
+ ) -> torch.Tensor:
153
+ n_1 = self.norm_1(x)
154
+ h = self.attn(n_1, cos, sin, mask, input_pos)
155
+ if self.config.parallel_residual:
156
+ n_2 = n_1 if self.config.shared_attention_norm else self.norm_2(x)
157
+ x = self.mlp(n_2) + h + x
158
+ else:
159
+ if self.config.shared_attention_norm:
160
+ raise NotImplementedError(
161
+ "No checkpoint amongst the ones we support uses this configuration"
162
+ " (non-parallel residual and shared attention norm)."
163
+ )
164
+ x = h + x
165
+ x = self.mlp(self.norm_2(x)) + x
166
+ return x
167
+
168
+
169
+ class CausalSelfAttention(nn.Module):
170
+ def __init__(self, config: Config) -> None:
171
+ super().__init__()
172
+ shape = (config.n_head + 2 * config.n_query_groups) * config.head_size
173
+ # key, query, value projections for all heads, but in a batch
174
+ self.attn = nn.Linear(config.n_embd, shape, bias=config.bias)
175
+ # output projection
176
+ self.proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
177
+ # disabled by default
178
+ self.kv_cache: Optional[KVCache] = None
179
+
180
+ self.config = config
181
+
182
+ def forward(
183
+ self,
184
+ x: torch.Tensor,
185
+ cos: torch.Tensor,
186
+ sin: torch.Tensor,
187
+ mask: Optional[torch.Tensor] = None,
188
+ input_pos: Optional[torch.Tensor] = None,
189
+ ) -> torch.Tensor:
190
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
191
+
192
+ qkv = self.attn(x)
193
+
194
+ # assemble into a number of query groups to support MHA, MQA and GQA together (see `config.n_query_groups`)
195
+ q_per_kv = self.config.n_head // self.config.n_query_groups
196
+ total_qkv = q_per_kv + 2 # each group has 1+ queries, 1 key, and 1 value
197
+ qkv = qkv.view(B, T, self.config.n_query_groups, total_qkv, self.config.head_size)
198
+ qkv = qkv.permute(0, 2, 3, 1, 4) # (B, n_query_groups, total_qkv, T, hs)
199
+
200
+ # split batched computation into three
201
+ q, k, v = qkv.split((q_per_kv, 1, 1), dim=2)
202
+
203
+ # maybe repeat k and v if for the non multi-head attention cases
204
+ # training: flash attention requires it
205
+ # inference: multi-query would require a full kv cache so avoid it to limit its memory usage
206
+ if self.config.n_query_groups != self.config.n_head and (input_pos is None or self.config.n_query_groups != 1):
207
+ k = k.expand(B, self.config.n_query_groups, q_per_kv, T, self.config.head_size)
208
+ v = v.expand(B, self.config.n_query_groups, q_per_kv, T, self.config.head_size)
209
+
210
+ q = q.reshape(B, -1, T, self.config.head_size) # (B, nh_q, T, hs)
211
+ k = k.reshape(B, -1, T, self.config.head_size) # (B, nh_k, T, hs)
212
+ v = v.reshape(B, -1, T, self.config.head_size) # (B, nh_v, T, hs)
213
+
214
+ q_roped = apply_rope(q[..., : self.config.rope_n_elem], cos, sin)
215
+ k_roped = apply_rope(k[..., : self.config.rope_n_elem], cos, sin)
216
+ q = torch.cat((q_roped, q[..., self.config.rope_n_elem :]), dim=-1)
217
+ k = torch.cat((k_roped, k[..., self.config.rope_n_elem :]), dim=-1)
218
+
219
+ if input_pos is not None:
220
+ if not isinstance(self.kv_cache, KVCache):
221
+ raise TypeError("You need to call `gpt.set_kv_cache()`")
222
+ k, v = self.kv_cache(input_pos, k, v)
223
+
224
+ y = self.scaled_dot_product_attention(q, k, v, mask)
225
+
226
+ y = y.reshape(B, T, C) # re-assemble all head outputs side by side
227
+
228
+ # output projection
229
+ return self.proj(y)
230
+
231
+ def scaled_dot_product_attention(
232
+ self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, mask: Optional[torch.Tensor] = None
233
+ ) -> torch.Tensor:
234
+ scale = 1.0 / math.sqrt(self.config.head_size)
235
+ y = torch.nn.functional.scaled_dot_product_attention(
236
+ q, k, v, attn_mask=mask, dropout_p=0.0, scale=scale, is_causal=mask is None
237
+ )
238
+ return y.transpose(1, 2)
239
+
240
+ def build_kv_cache(
241
+ self,
242
+ batch_size: int,
243
+ max_seq_length: int,
244
+ rope_cache_length: Optional[int] = None,
245
+ device: Optional[torch.device] = None,
246
+ dtype: Optional[torch.dtype] = None,
247
+ ) -> "KVCache":
248
+ heads = 1 if self.config.n_query_groups == 1 else self.config.n_head
249
+ v_shape = (batch_size, heads, max_seq_length, self.config.head_size)
250
+ if rope_cache_length is None:
251
+ if self.config.rotary_percentage != 1.0:
252
+ raise TypeError("Please pass the `rope_cache_length=gpt.cos.size(-1)` value")
253
+ k_shape = v_shape
254
+ else:
255
+ k_shape = (
256
+ batch_size,
257
+ heads,
258
+ max_seq_length,
259
+ rope_cache_length + self.config.head_size - self.config.rope_n_elem,
260
+ )
261
+ return KVCache(k_shape, v_shape, device=device, dtype=dtype)
262
+
263
+
264
+ class GptNeoxMLP(nn.Module):
265
+ def __init__(self, config: Config) -> None:
266
+ super().__init__()
267
+ self.fc = nn.Linear(config.n_embd, config.intermediate_size, bias=config.bias)
268
+ self.proj = nn.Linear(config.intermediate_size, config.n_embd, bias=config.bias)
269
+
270
+ self.config = config
271
+
272
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
273
+ x = self.fc(x)
274
+ x = torch.nn.functional.gelu(x, approximate=self.config.gelu_approximate)
275
+ return self.proj(x)
276
+
277
+
278
+ class LLaMAMLP(nn.Module):
279
+ def __init__(self, config: Config) -> None:
280
+ super().__init__()
281
+ self.fc_1 = nn.Linear(config.n_embd, config.intermediate_size, bias=config.bias)
282
+ self.fc_2 = nn.Linear(config.n_embd, config.intermediate_size, bias=config.bias)
283
+ self.proj = nn.Linear(config.intermediate_size, config.n_embd, bias=config.bias)
284
+
285
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
286
+ x_fc_1 = self.fc_1(x)
287
+ x_fc_2 = self.fc_2(x)
288
+ x = torch.nn.functional.silu(x_fc_1) * x_fc_2
289
+ return self.proj(x)
290
+
291
+
292
+ def build_rope_cache(
293
+ seq_len: int, n_elem: int, device: Optional[torch.device] = None, base: int = 10000, condense_ratio: int = 1
294
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
295
+ """Enhanced Transformer with Rotary Position Embedding.
296
+
297
+ Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/
298
+ transformers/rope/__init__.py. MIT License:
299
+ https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license.
300
+ """
301
+ # $\Theta = {\theta_i = 10000^{\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
302
+ theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, device=device).float() / n_elem))
303
+
304
+ # Create position indexes `[0, 1, ..., seq_len - 1]`
305
+ seq_idx = torch.arange(seq_len, device=device) / condense_ratio
306
+
307
+ # Calculate the product of position index and $\theta_i$
308
+ idx_theta = torch.outer(seq_idx, theta).repeat(1, 2)
309
+
310
+ return torch.cos(idx_theta), torch.sin(idx_theta)
311
+
312
+
313
+ def apply_rope(x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor:
314
+ head_size = x.size(-1)
315
+ x1 = x[..., : head_size // 2] # (B, nh, T, hs/2)
316
+ x2 = x[..., head_size // 2 :] # (B, nh, T, hs/2)
317
+ rotated = torch.cat((-x2, x1), dim=-1) # (B, nh, T, hs)
318
+ roped = (x * cos) + (rotated * sin)
319
+ return roped.type_as(x)
320
+
321
+
322
+ class KVCache(nn.Module):
323
+ def __init__(
324
+ self,
325
+ k_shape: Tuple[int, int, int, int],
326
+ v_shape: Tuple[int, int, int, int],
327
+ device: Optional[torch.device] = None,
328
+ dtype: Optional[torch.dtype] = None,
329
+ ) -> None:
330
+ super().__init__()
331
+ self.register_buffer("k", torch.zeros(k_shape, device=device, dtype=dtype), persistent=False)
332
+ self.register_buffer("v", torch.zeros(v_shape, device=device, dtype=dtype), persistent=False)
333
+
334
+ def forward(self, input_pos: torch.Tensor, k: torch.Tensor, v: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
335
+ # move the buffer to the activation dtype for when AMP is used
336
+ self.k = self.k.to(k.dtype)
337
+ self.v = self.v.to(v.dtype)
338
+ # update the cache
339
+ k = self.k.index_copy_(2, input_pos, k)
340
+ v = self.v.index_copy_(2, input_pos, v)
341
+ return k, v
342
+
343
+ def reset_parameters(self) -> None:
344
+ torch.nn.init.zeros_(self.k)
345
+ torch.nn.init.zeros_(self.v)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio==3.50.2
2
+ torch>=2.1.0
3
+ lightning @ git+https://github.com/Lightning-AI/lightning@6cbe9ceb560d798892bdae9186291acf9bf5d2e3
4
+ jsonargparse[signatures] # CLI
tokenizer.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from pathlib import Path
3
+ from typing import Optional, Union
4
+
5
+ import torch
6
+
7
+
8
+ class Tokenizer:
9
+ def __init__(self, checkpoint_dir: Union[Path, str]) -> None:
10
+ checkpoint_dir = Path(checkpoint_dir)
11
+ if not checkpoint_dir.exists():
12
+ raise NotADirectoryError(f"The checkpoint directory does not exist: {str(checkpoint_dir)}")
13
+
14
+ self.use_bos = self.check_if_bos_token_used(checkpoint_dir)
15
+ self.bos_id = None
16
+ self.eos_id = None
17
+
18
+ # some checkpoints have both files, `.model` takes precedence
19
+ if (vocabulary_path := checkpoint_dir / "tokenizer.model").is_file():
20
+ from sentencepiece import SentencePieceProcessor
21
+
22
+ self.processor = SentencePieceProcessor(model_file=str(vocabulary_path))
23
+ self.backend = "sentencepiece"
24
+ self.bos_id = self.processor.bos_id()
25
+ self.eos_id = self.processor.eos_id()
26
+
27
+ elif (vocabulary_path := checkpoint_dir / "tokenizer.json").is_file():
28
+ from tokenizers import Tokenizer as HFTokenizer
29
+
30
+ self.processor = HFTokenizer.from_file(str(vocabulary_path))
31
+ self.backend = "huggingface"
32
+
33
+ if (special_tokens_path := checkpoint_dir / "tokenizer_config.json").is_file():
34
+ with open(special_tokens_path) as fp:
35
+ config = json.load(fp)
36
+ bos_token = config.get("bos_token")
37
+ self.bos_id = self.token_to_id(bos_token) if bos_token is not None else None
38
+ eos_token = config.get("eos_token")
39
+ self.eos_id = self.token_to_id(eos_token) if eos_token is not None else None
40
+ if (special_tokens_path := checkpoint_dir / "generation_config.json").is_file():
41
+ with open(special_tokens_path) as fp:
42
+ config = json.load(fp)
43
+ if self.bos_id is None:
44
+ self.bos_id = config.get("bos_token_id")
45
+ if self.eos_id is None:
46
+ self.eos_id = config.get("eos_token_id")
47
+ else:
48
+ raise NotImplementedError
49
+
50
+ @property
51
+ def vocab_size(self) -> int:
52
+ if self.backend == "huggingface":
53
+ return self.processor.get_vocab_size(with_added_tokens=False)
54
+ if self.backend == "sentencepiece":
55
+ return self.processor.vocab_size()
56
+ raise RuntimeError
57
+
58
+ def token_to_id(self, token: str) -> int:
59
+ if self.backend == "huggingface":
60
+ id_ = self.processor.token_to_id(token)
61
+ elif self.backend == "sentencepiece":
62
+ id_ = self.processor.piece_to_id(token)
63
+ else:
64
+ raise RuntimeError
65
+ if id_ is None:
66
+ raise ValueError(f"token {token!r} not found in the collection.")
67
+ return id_
68
+
69
+ def check_if_bos_token_used(self, checkpoint_dir: Path) -> bool:
70
+ if not (tokenizer_config_path := checkpoint_dir / "tokenizer_config.json").is_file():
71
+ return False
72
+ with open(tokenizer_config_path) as fp:
73
+ config = json.load(fp)
74
+ if any(config.get(check, False) for check in ("add_bos_token", "add_prefix_space")):
75
+ return True
76
+ # for examples that also use the Llama tokenizer, but do not have or set add_bos_token to True.
77
+ # ex: https://huggingface.co/stabilityai/StableBeluga2/blob/main/tokenizer_config.json#L2
78
+ return config.get("add_bos_token") is None and config.get("tokenizer_class") == "LlamaTokenizer"
79
+
80
+ def encode(
81
+ self,
82
+ string: str,
83
+ device: Optional[torch.device] = None,
84
+ bos: Optional[bool] = None,
85
+ eos: bool = False,
86
+ max_length: int = -1,
87
+ ) -> torch.Tensor:
88
+ if self.backend == "huggingface":
89
+ tokens = self.processor.encode(string).ids
90
+ elif self.backend == "sentencepiece":
91
+ tokens = self.processor.encode(string)
92
+ else:
93
+ raise RuntimeError
94
+ if bos or (bos is None and self.use_bos):
95
+ bos_id = self.bos_id
96
+ if bos_id is None:
97
+ raise NotImplementedError("This tokenizer does not have a defined a bos token")
98
+ tokens = [bos_id] + tokens
99
+ if eos:
100
+ tokens = tokens + [self.eos_id]
101
+ if max_length > 0:
102
+ tokens = tokens[:max_length]
103
+ return torch.tensor(tokens, dtype=torch.int, device=device)
104
+
105
+ def decode(self, tensor: torch.Tensor) -> str:
106
+ tokens = [tensor.item()] if tensor.ndim == 0 else tensor.tolist()
107
+ return self.processor.decode(tokens)
utils.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utility functions for training and inference."""
2
+ import math
3
+ import pickle
4
+ import sys
5
+ from contextlib import nullcontext
6
+ from io import BytesIO
7
+ from pathlib import Path
8
+ from typing import TYPE_CHECKING, ContextManager, Dict, List, Mapping, Optional, TypeVar, Union
9
+
10
+ import lightning as L
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.utils._device
14
+ from lightning.fabric.strategies import FSDPStrategy
15
+ from lightning.fabric.utilities.load import _lazy_load as lazy_load
16
+ from torch.serialization import normalize_storage_type
17
+
18
+ if TYPE_CHECKING:
19
+ from lit_gpt import GPT
20
+
21
+
22
+ def find_multiple(n: int, k: int) -> int:
23
+ assert k > 0
24
+ if n % k == 0:
25
+ return n
26
+ return n + k - (n % k)
27
+
28
+
29
+ def num_parameters(module: nn.Module, requires_grad: Optional[bool] = None) -> int:
30
+ total = 0
31
+ for p in module.parameters():
32
+ if requires_grad is None or p.requires_grad == requires_grad:
33
+ if hasattr(p, "quant_state"):
34
+ # bitsandbytes 4bit layer support
35
+ total += math.prod(p.quant_state[1])
36
+ else:
37
+ total += p.numel()
38
+ return total
39
+
40
+
41
+ def gptq_quantization(enabled: bool = False) -> ContextManager:
42
+ if not enabled:
43
+ return nullcontext()
44
+
45
+ from lightning.fabric.plugins.precision.utils import _ClassReplacementContextManager
46
+
47
+ from quantize.gptq import ColBlockQuantizedLinear
48
+
49
+ class QuantizedLinear(ColBlockQuantizedLinear):
50
+ def __init__(self, *args, **kwargs):
51
+ super().__init__(*args, bits=4, tile_cols=-1, **kwargs)
52
+
53
+ return _ClassReplacementContextManager({"torch.nn.Linear": QuantizedLinear})
54
+
55
+
56
+ def check_valid_checkpoint_dir(checkpoint_dir: Path, model_name: str) -> None:
57
+ if model_name == "pythia_160m_deduped_huggingface":
58
+ selected_model_name = "pythia_160m_deduped_hf.pth"
59
+ elif model_name == "pythia_160m_deduped_custom":
60
+ selected_model_name = "pythia_160m_deduped_custom.pth"
61
+ else:
62
+ selected_model_name = "lit_model.pth"
63
+
64
+ files = {
65
+ "lit_model.pth": (checkpoint_dir / selected_model_name).is_file(),
66
+ "lit_config.json": (checkpoint_dir / "lit_config.json").is_file(),
67
+ "tokenizer.json OR tokenizer.model": (checkpoint_dir / "tokenizer.json").is_file() or (
68
+ checkpoint_dir / "tokenizer.model"
69
+ ).is_file(),
70
+ "tokenizer_config.json": (checkpoint_dir / "tokenizer_config.json").is_file(),
71
+ }
72
+ if checkpoint_dir.is_dir():
73
+ if all(files.values()):
74
+ # we're good
75
+ return
76
+ problem = f" is missing the files: {[f for f, exists in files.items() if not exists]!r}"
77
+ else:
78
+ problem = " is not a checkpoint directory"
79
+
80
+ # list locally available checkpoints
81
+ available = list(Path("checkpoints").glob("*/*"))
82
+ if available:
83
+ options = "\n --checkpoint_dir ".join([""] + [repr(str(p.resolve())) for p in available])
84
+ extra = f"\nYou have downloaded locally:{options}\n"
85
+ else:
86
+ extra = ""
87
+
88
+ error_message = (
89
+ f"--checkpoint_dir {str(checkpoint_dir.absolute())!r}{problem}."
90
+ "\nFind download instructions at https://github.com/Lightning-AI/lit-gpt/blob/main/tutorials\n"
91
+ f"{extra}\nSee all download options by running:\n python scripts/download.py"
92
+ )
93
+ print(error_message, file=sys.stderr)
94
+ raise SystemExit(1)
95
+
96
+
97
+ class SavingProxyForStorage:
98
+ def __init__(self, obj, saver, protocol_version=5):
99
+ self.protocol_version = protocol_version
100
+ self.saver = saver
101
+ if not (isinstance(obj, torch.storage.TypedStorage) or torch.is_storage(obj)):
102
+ raise TypeError(f"expected storage, not {type(obj)}")
103
+
104
+ # this logic is taken from PyTorch 2.0+ torch/serialization.py
105
+ if isinstance(obj, torch.storage.TypedStorage):
106
+ # PT upstream wants to deprecate this eventually...
107
+ storage = obj._untyped_storage
108
+ storage_type_str = obj._pickle_storage_type()
109
+ storage_type = getattr(torch, storage_type_str)
110
+ storage_numel = obj._size()
111
+ else:
112
+ storage = obj
113
+ storage_type = normalize_storage_type(type(obj))
114
+ storage_numel = storage.nbytes()
115
+
116
+ storage_key = saver._write_storage_and_return_key(storage)
117
+ location = torch.serialization.location_tag(storage)
118
+
119
+ self.storage_info = ("storage", storage_type, storage_key, location, storage_numel)
120
+
121
+ def __reduce_ex__(self, protocol_version):
122
+ assert False, "this should be handled with out of band"
123
+
124
+
125
+ class SavingProxyForTensor:
126
+ def __init__(self, tensor, saver, protocol_version=5):
127
+ self.protocol_version = protocol_version
128
+ self.reduce_ret_fn, reduce_args = tensor.__reduce_ex__(protocol_version)
129
+ if reduce_args[0] == torch._utils._rebuild_tensor_v2:
130
+ # for Tensors with Python attributes
131
+ (a0, a1, (storage, *a2_other), *other_reduce_args) = reduce_args
132
+ assert isinstance(storage, torch.storage.TypedStorage), "Please check for updates"
133
+ storage_proxy = SavingProxyForStorage(storage, saver, protocol_version=protocol_version)
134
+ self.reduce_args = (a0, a1, (storage_proxy, *a2_other), *other_reduce_args)
135
+ else:
136
+ (storage, *other_reduce_args) = reduce_args
137
+ assert isinstance(storage, torch.storage.TypedStorage), "Please check for updates"
138
+ storage_proxy = SavingProxyForStorage(storage, saver, protocol_version=protocol_version)
139
+ self.reduce_args = (storage_proxy, *other_reduce_args)
140
+
141
+ def __reduce_ex__(self, protocol_version):
142
+ if protocol_version != self.protocol_version:
143
+ raise RuntimeError(f"Unexpected protocol version: expected {self.protocol_version}, got {protocol_version}")
144
+ return self.reduce_ret_fn, self.reduce_args
145
+
146
+
147
+ class IncrementalPyTorchPickler(pickle.Pickler):
148
+ def __init__(self, saver, *args, **kwargs):
149
+ super().__init__(*args, **kwargs)
150
+ self.storage_dtypes = {}
151
+ self.saver = saver
152
+ self.id_map = {}
153
+
154
+ # this logic is taken from PyTorch 2.0+ torch/serialization.py
155
+ def persistent_id(self, obj):
156
+ # FIXME: the docs say that persistent_id should only return a string
157
+ # but torch store returns tuples. This works only in the binary protocol
158
+ # see
159
+ # https://docs.python.org/2/library/pickle.html#pickling-and-unpickling-external-objects
160
+ # https://github.com/python/cpython/blob/master/Lib/pickle.py#L527-L537
161
+ if isinstance(obj, SavingProxyForStorage):
162
+ return obj.storage_info
163
+
164
+ if isinstance(obj, torch.storage.TypedStorage) or torch.is_storage(obj):
165
+ if isinstance(obj, torch.storage.TypedStorage):
166
+ # TODO: Once we decide to break serialization FC, this case
167
+ # can be deleted
168
+ storage = obj._untyped_storage
169
+ storage_dtype = obj.dtype
170
+ storage_type_str = obj._pickle_storage_type()
171
+ storage_type = getattr(torch, storage_type_str)
172
+ storage_numel = obj._size()
173
+
174
+ else:
175
+ storage = obj
176
+ storage_dtype = torch.uint8
177
+ storage_type = normalize_storage_type(type(obj))
178
+ storage_numel = storage.nbytes()
179
+
180
+ # If storage is allocated, ensure that any other saved storages
181
+ # pointing to the same data all have the same dtype. If storage is
182
+ # not allocated, don't perform this check
183
+ if storage.data_ptr() != 0:
184
+ if storage.data_ptr() in self.storage_dtypes:
185
+ if storage_dtype != self.storage_dtypes[storage.data_ptr()]:
186
+ raise RuntimeError(
187
+ "Cannot save multiple tensors or storages that view the same data as different types"
188
+ )
189
+ else:
190
+ self.storage_dtypes[storage.data_ptr()] = storage_dtype
191
+
192
+ storage_key = self.id_map.get(storage._cdata)
193
+ if storage_key is None:
194
+ storage_key = self.saver._write_storage_and_return_key(storage)
195
+ self.id_map[storage._cdata] = storage_key
196
+ location = torch.serialization.location_tag(storage)
197
+
198
+ return ("storage", storage_type, storage_key, location, storage_numel)
199
+
200
+ return None
201
+
202
+
203
+ class incremental_save:
204
+ def __init__(self, name):
205
+ self.name = name
206
+ self.zipfile = torch._C.PyTorchFileWriter(str(name))
207
+ self.has_saved = False
208
+ self.next_key = 0
209
+
210
+ def __enter__(self):
211
+ return self
212
+
213
+ def store_early(self, tensor):
214
+ if isinstance(tensor, torch.Tensor):
215
+ return SavingProxyForTensor(tensor, self)
216
+ raise TypeError(f"can only store tensors early, not {type(tensor)}")
217
+
218
+ def save(self, obj):
219
+ if self.has_saved:
220
+ raise RuntimeError("have already saved")
221
+ # Write the pickle data for `obj`
222
+ data_buf = BytesIO()
223
+ pickler = IncrementalPyTorchPickler(self, data_buf, protocol=5)
224
+ pickler.dump(obj)
225
+ data_value = data_buf.getvalue()
226
+ self.zipfile.write_record("data.pkl", data_value, len(data_value))
227
+ self.has_saved = True
228
+
229
+ def _write_storage_and_return_key(self, storage):
230
+ if self.has_saved:
231
+ raise RuntimeError("have already saved")
232
+ key = self.next_key
233
+ self.next_key += 1
234
+ name = f"data/{key}"
235
+ if storage.device.type != "cpu":
236
+ storage = storage.cpu()
237
+ num_bytes = storage.nbytes()
238
+ self.zipfile.write_record(name, storage.data_ptr(), num_bytes)
239
+ return key
240
+
241
+ def __exit__(self, type, value, traceback):
242
+ self.zipfile.write_end_of_file()
243
+
244
+
245
+ T = TypeVar("T")
246
+
247
+
248
+ def chunked_cross_entropy(
249
+ logits: Union[torch.Tensor, List[torch.Tensor]], targets: torch.Tensor, chunk_size: int = 128
250
+ ) -> torch.Tensor:
251
+ # with large max_sequence_lengths, the beginning of `backward` allocates a large memory chunk which can dominate
252
+ # the memory usage in fine-tuning settings with low number of parameters.
253
+ # as a workaround hack, the cross entropy computation is chunked to force it to deallocate on the go, reducing
254
+ # the memory spike's magnitude
255
+
256
+ # lm_head was chunked (we are fine-tuning)
257
+ if isinstance(logits, list):
258
+ # don't want to chunk cross entropy
259
+ if chunk_size == 0:
260
+ logits = torch.cat(logits, dim=1)
261
+ logits = logits.reshape(-1, logits.size(-1))
262
+ targets = targets.reshape(-1)
263
+ return torch.nn.functional.cross_entropy(logits, targets, ignore_index=-1)
264
+
265
+ # chunk cross entropy
266
+ logit_chunks = [logit_chunk.reshape(-1, logit_chunk.size(-1)) for logit_chunk in logits]
267
+ target_chunks = [target_chunk.reshape(-1) for target_chunk in targets.split(logits[0].size(1), dim=1)]
268
+ loss_chunks = [
269
+ torch.nn.functional.cross_entropy(logit_chunk, target_chunk, ignore_index=-1, reduction="none")
270
+ for logit_chunk, target_chunk in zip(logit_chunks, target_chunks)
271
+ ]
272
+ non_masked_elems = (targets != -1).sum()
273
+ mean_loss = torch.cat(loss_chunks).sum() / max(1, non_masked_elems)
274
+ return mean_loss
275
+
276
+ # no chunking at all
277
+ logits = logits.reshape(-1, logits.size(-1))
278
+ targets = targets.reshape(-1)
279
+ if chunk_size == 0:
280
+ return torch.nn.functional.cross_entropy(logits, targets, ignore_index=-1)
281
+
282
+ # lm_head wasn't chunked, chunk cross entropy
283
+ logit_chunks = logits.split(chunk_size)
284
+ target_chunks = targets.split(chunk_size)
285
+ loss_chunks = [
286
+ torch.nn.functional.cross_entropy(logit_chunk, target_chunk, ignore_index=-1, reduction="none")
287
+ for logit_chunk, target_chunk in zip(logit_chunks, target_chunks)
288
+ ]
289
+ non_masked_elems = (targets != -1).sum()
290
+ mean_loss = torch.cat(loss_chunks).sum() / max(1, non_masked_elems)
291
+ return mean_loss
292
+
293
+
294
+ def map_old_state_dict_weights(state_dict: Dict, mapping: Mapping, prefix: str) -> Dict:
295
+ for checkpoint_name, attribute_name in mapping.items():
296
+ full_checkpoint_name = prefix + checkpoint_name
297
+ if full_checkpoint_name in state_dict:
298
+ full_attribute_name = prefix + attribute_name
299
+ state_dict[full_attribute_name] = state_dict.pop(full_checkpoint_name)
300
+ return state_dict
301
+
302
+
303
+ def get_default_supported_precision(training: bool) -> str:
304
+ """Return default precision that is supported by the hardware: either `bf16` or `16`.
305
+
306
+ Args:
307
+ training: `-mixed` or `-true` version of the precision to use
308
+
309
+ Returns:
310
+ default precision that is suitable for the task and is supported by the hardware
311
+ """
312
+ from lightning.fabric.accelerators import MPSAccelerator
313
+
314
+ if MPSAccelerator.is_available() or (torch.cuda.is_available() and not torch.cuda.is_bf16_supported()):
315
+ return "16-mixed" if training else "16-true"
316
+ return "bf16-mixed" if training else "bf16-true"
317
+
318
+
319
+ def load_checkpoint(fabric: L.Fabric, model: nn.Module, checkpoint_path: Path, strict: bool = True) -> None:
320
+ if isinstance(fabric.strategy, FSDPStrategy):
321
+ fabric.load_raw(checkpoint_path, model, strict=strict)
322
+ else:
323
+ state_dict = lazy_load(checkpoint_path)
324
+ state_dict = state_dict.get("model", state_dict)
325
+ model.load_state_dict(state_dict, strict=strict)
326
+
327
+
328
+ def flops_per_param(max_seq_length: int, n_layer: int, n_embd: int, n_params: int) -> int:
329
+ flops_per_token = 2 * n_params # each parameter is used for a MAC (2 FLOPS) per network operation
330
+ # this assumes that all samples have a fixed length equal to the block size
331
+ # which is most likely false during finetuning
332
+ flops_per_seq = flops_per_token * max_seq_length
333
+ attn_flops_per_seq = n_layer * 2 * 2 * (n_embd * (max_seq_length**2))
334
+ return flops_per_seq + attn_flops_per_seq
335
+
336
+
337
+ def estimate_flops(model: "GPT", training: bool) -> int:
338
+ """Measures estimated FLOPs for MFU.
339
+
340
+ Refs:
341
+ * https://ar5iv.labs.arxiv.org/html/2205.05198#A1
342
+ * https://ar5iv.labs.arxiv.org/html/2204.02311#A2
343
+ """
344
+ # using all parameters for this is a naive over estimation because not all model parameters actually contribute to
345
+ # this FLOP computation (e.g. embedding, norm). For this reason, the result will be higher by a fixed percentage
346
+ # (~10%) compared to the measured FLOPs, making those lower but more realistic.
347
+ # For a proper estimate, this needs a more fine-grained calculation as in Appendix A of the paper.
348
+ n_trainable_params = num_parameters(model, requires_grad=True)
349
+ trainable_flops = flops_per_param(
350
+ model.max_seq_length, model.config.n_layer, model.config.n_embd, n_trainable_params
351
+ )
352
+ # forward + backward + gradients (assumes no gradient accumulation)
353
+ ops_per_step = 3 if training else 1
354
+ n_frozen_params = num_parameters(model, requires_grad=False)
355
+ frozen_flops = flops_per_param(model.max_seq_length, model.config.n_layer, model.config.n_embd, n_frozen_params)
356
+ # forward + backward
357
+ frozen_ops_per_step = 2 if training else 1
358
+ return ops_per_step * trainable_flops + frozen_ops_per_step * frozen_flops