TRM-coding commited on
Commit
f9cc63a
·
verified ·
1 Parent(s): d84ddb6

Upload 7 files

Browse files
Files changed (6) hide show
  1. .gitignore +1 -0
  2. config.json +39 -0
  3. generation_config.json +6 -0
  4. model.py +12 -0
  5. model.safetensors +3 -0
  6. train.py +205 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ ./wandb/latest-run/
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "TRM-coding/codeparrot-small",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.41.1",
37
+ "use_cache": true,
38
+ "vocab_size": 32768
39
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.41.1"
6
+ }
model.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
2
+ model_ckpt = "codeparrot"
3
+ org = "transformersbook"
4
+ def model_size(model):
5
+ return sum(t.numel() for t in model.parameters())
6
+ tokenizer = AutoTokenizer.from_pretrained(org + "/" + model_ckpt)
7
+ config_small = AutoConfig.from_pretrained("gpt2", vocab_size=len(tokenizer))
8
+ model_small = AutoModelForCausalLM.from_config(config_small)
9
+
10
+ print(f'GPT-2 size: {model_size(model_small)/1000**2:.1f}M parameters')
11
+
12
+ model_small.save_pretrained("models/" + model_ckpt + "-small", push_to_hub=True)
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6427cf51827993c3202509bb9f9ec11b2d86fc836130d91d00a85fdcd473a82
3
+ size 444048000
train.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import GPT2LMHeadModel, AutoTokenizer
2
+ from transformers import AdamW, get_scheduler, set_seed
3
+ from datasets import load_dataset
4
+ from accelerate import Accelerator
5
+ import datasets, transformers
6
+ from huggingface_hub import Repository
7
+
8
+ from torch.utils.data import IterableDataset
9
+ from torch.utils.data.dataloader import DataLoader
10
+ from torch.utils.tensorboard import SummaryWriter
11
+ from argparse import Namespace
12
+ import torch
13
+ import logging
14
+ import wandb
15
+
16
+ class ConstantLengthDataset(IterableDataset):
17
+
18
+ def __init__(self, tokenizer, dataset, seq_length=1024,
19
+ num_of_sequences=1024, chars_per_token=3.6):
20
+ self.tokenizer = tokenizer
21
+ self.concat_token_id = tokenizer.bos_token_id
22
+ self.dataset = dataset
23
+ self.seq_length = seq_length
24
+ self.input_characters = seq_length * chars_per_token * num_of_sequences
25
+
26
+ def __iter__(self):
27
+ iterator = iter(self.dataset)
28
+ more_examples = True
29
+ while more_examples:
30
+ buffer, buffer_len = [], 0
31
+ while True:
32
+ if buffer_len >= self.input_characters:
33
+ break
34
+ try:
35
+ buffer.append(next(iterator)['content'])
36
+ buffer_len += len(buffer[-1])
37
+ except StopIteration:
38
+ more_examples = False
39
+ break
40
+ tokenized_inputs = tokenizer(buffer, truncation=False)['input_ids']
41
+ all_token_ids = []
42
+ for tokenized_input in tokenized_inputs:
43
+ all_token_ids.extend(tokenized_input + [self.concat_token_id])
44
+ for i in range(0, len(all_token_ids), self.seq_length):
45
+ input_ids = all_token_ids[i : i + self.seq_length]
46
+ if len(input_ids) == self.seq_length:
47
+ yield torch.tensor(input_ids)
48
+
49
+ def setup_logging(project_name):
50
+ logger = logging.getLogger(__name__)
51
+ logging.basicConfig(
52
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
53
+ datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, handlers=[
54
+ logging.FileHandler(f"log/debug_{accelerator.process_index}.log"),
55
+ logging.StreamHandler()])
56
+ if accelerator.is_main_process: # we only want to setup logging once
57
+ wandb.init(project=project_name, config=args,settings=wandb.Settings(_disable_stats=True))
58
+ run_name = wandb.run.name
59
+ tb_writer = SummaryWriter()
60
+ tb_writer.add_hparams(vars(args), {'0': 0})
61
+ logger.setLevel(logging.INFO)
62
+ datasets.utils.logging.set_verbosity_info()
63
+ transformers.utils.logging.set_verbosity_info()
64
+ else:
65
+ tb_writer = None
66
+ run_name = ''
67
+ logger.setLevel(logging.ERROR)
68
+ datasets.utils.logging.set_verbosity_error()
69
+ transformers.utils.logging.set_verbosity_error()
70
+ return logger, tb_writer, run_name
71
+
72
+ def create_dataloaders(dataset_name, args):
73
+ ds_kwargs = {"streaming":True, "chunksize":40<<20, "error_bad_chunk":False}
74
+ train_data = load_dataset(dataset_name+'-train', split='train', streaming=True)
75
+ train_data = train_data.shuffle(buffer_size=args.shuffle_buffer,
76
+ seed=args.seed)
77
+ valid_data = load_dataset(dataset_name+'-valid', split="validation",streaming=True)
78
+ train_dataset = ConstantLengthDataset(tokenizer, train_data,
79
+ seq_length=args.seq_length)
80
+ valid_dataset = ConstantLengthDataset(tokenizer, valid_data,
81
+ seq_length=args.seq_length)
82
+ train_dataloader=DataLoader(train_dataset, batch_size=args.train_batch_size)
83
+ eval_dataloader=DataLoader(valid_dataset, batch_size=args.valid_batch_size)
84
+ return train_dataloader, eval_dataloader
85
+
86
+ def get_grouped_params(model, args, no_decay=["bias", "LayerNorm.weight"]):
87
+ params_with_wd, params_without_wd = [], []
88
+ for n, p in model.named_parameters():
89
+ if any(nd in n for nd in no_decay): params_without_wd.append(p)
90
+ else: params_with_wd.append(p)
91
+ return [{'params': params_with_wd, 'weight_decay': args.weight_decay},
92
+ {'params': params_without_wd, 'weight_decay': 0.0}]
93
+
94
+ def log_metrics(step, metrics):
95
+ logger.info(f"Step {step}: {metrics}")
96
+ if accelerator.is_main_process:
97
+ wandb.log(metrics)
98
+ [tb_writer.add_scalar(k, v, step) for k, v in metrics.items()]
99
+
100
+ def evaluate(args):
101
+ model.eval()
102
+ losses = []
103
+ for step, batch in enumerate(eval_dataloader):
104
+ print(f'step:{step}',end='\r')
105
+ with torch.no_grad():
106
+ outputs = model(batch, labels=batch)
107
+ loss = outputs.loss.repeat(args.valid_batch_size)
108
+ losses.append(accelerator.gather(loss))
109
+ if args.max_eval_steps > 0 and step >= args.max_eval_steps: break
110
+ loss = torch.mean(torch.cat(losses))
111
+ try: perplexity = torch.exp(loss)
112
+ except OverflowError: perplexity = float("inf")
113
+ return loss.item(), perplexity.item()
114
+
115
+ # Accelerator
116
+ accelerator = Accelerator(dispatch_batches=True)
117
+ acc_state = {str(k): str(v) for k, v in accelerator.state.__dict__.items()}
118
+ # Hyperparameters
119
+ project_name = 'TRM-coding/PythonCopilot'
120
+ dataset_name = 'transformersbook/codeparrot'
121
+ config = {"train_batch_size": 2,
122
+ "valid_batch_size": 2,
123
+ "weight_decay": 0.1,
124
+ "shuffle_buffer": 1000,
125
+ "learning_rate": 5e-4,
126
+ "lr_scheduler_type": "cosine",
127
+ "num_warmup_steps": 0,
128
+ "gradient_accumulation_steps": 1,
129
+ "max_train_steps": 15,
130
+ "max_eval_steps": 15,
131
+ "seq_length": 1024,
132
+ "seed": 1,
133
+ "save_checkpoint_steps": 10}
134
+ args = Namespace(**config, **acc_state)
135
+ samples_per_step = accelerator.state.num_processes * args.train_batch_size
136
+ set_seed(args.seed)
137
+
138
+ # Logging
139
+ logger, tb_writer, run_name = setup_logging(project_name.split("/")[1])
140
+ logger.info(accelerator.state)
141
+
142
+ # Load model and tokenizer
143
+ if accelerator.is_main_process:
144
+ hf_repo = Repository("./", clone_from=project_name, revision=run_name,git_email='22301131@bjtu.edu.cn')
145
+ model = GPT2LMHeadModel.from_pretrained("TRM-coding/codeparrot-small")
146
+ model.gradient_checkpointing=True
147
+ tokenizer = AutoTokenizer.from_pretrained("TRM-coding/Origin-vocab")
148
+
149
+ # Load dataset and dataloader
150
+ train_dataloader, eval_dataloader = create_dataloaders(dataset_name, args)
151
+
152
+ # Prepare the optimizer and learning rate scheduler
153
+ optimizer = AdamW(get_grouped_params(model, args), lr=args.learning_rate)
154
+ lr_scheduler = get_scheduler(name=args.lr_scheduler_type, optimizer=optimizer,
155
+ num_warmup_steps=args.num_warmup_steps,
156
+ num_training_steps=args.max_train_steps,)
157
+ def get_lr(): return optimizer.param_groups[0]['lr']
158
+
159
+ # Prepare everything with our `accelerator`.
160
+ model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
161
+ model, optimizer, train_dataloader, eval_dataloader)
162
+
163
+ # Train model
164
+ model.train()
165
+ completed_steps = 0
166
+ for step, batch in enumerate(train_dataloader, start=1):
167
+ loss = model(batch, labels=batch, use_cache=False).loss
168
+ log_metrics(step, {'lr': get_lr(), 'samples': step*samples_per_step,
169
+ 'steps': completed_steps, 'loss/train': loss.item()})
170
+ loss = loss / args.gradient_accumulation_steps
171
+ accelerator.backward(loss)
172
+ if step % args.gradient_accumulation_steps == 0:
173
+ accelerator.clip_grad_norm_(model.parameters(), 1.0)
174
+ optimizer.step()
175
+ lr_scheduler.step()
176
+ optimizer.zero_grad()
177
+ completed_steps += 1
178
+ if step % args.save_checkpoint_steps == 0:
179
+ logger.info('Evaluating and saving model checkpoint')
180
+ eval_loss, perplexity = evaluate(args)
181
+ log_metrics(step, {'loss/eval': eval_loss, 'perplexity': perplexity})
182
+ accelerator.wait_for_everyone()
183
+ unwrapped_model = accelerator.unwrap_model(model)
184
+ if accelerator.is_main_process:
185
+ unwrapped_model.save_pretrained("./")
186
+ hf_repo.push_to_hub(commit_message=f'step {step}')
187
+ # hf_repo.git_add(pattern="**",auto_lfs_track=True)
188
+ # hf_repo.git_commit(commit_message=f'step {step}')
189
+ # hf_repo.git_push()
190
+ model.train()
191
+ if completed_steps >= args.max_train_steps:
192
+ break
193
+
194
+ # Evaluate and save the last checkpoint
195
+ logger.info('Evaluating and saving model after training')
196
+ eval_loss, perplexity = evaluate(args)
197
+ log_metrics(step, {'loss/eval': eval_loss, 'perplexity': perplexity})
198
+ accelerator.wait_for_everyone()
199
+ unwrapped_model = accelerator.unwrap_model(model)
200
+ if accelerator.is_main_process:
201
+ unwrapped_model.save_pretrained("./")
202
+ hf_repo.push_to_hub(commit_message=f'final model')
203
+ # hf_repo.git_add(pattern="**",auto_lfs_track=True)
204
+ # hf_repo.git_commit(commit_message=f'step {step}')
205
+ # hf_repo.git_push()