lixiangchun commited on
Commit
56377c9
·
verified ·
1 Parent(s): ce3043b

initial upload

Browse files

A very tiny OPT model trained on top gene rankings of more than 10 million cells.

.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ checkpoint/checkpoint-208000/trainer_state.json filter=lfs diff=lfs merge=lfs -text
checkpoint/checkpoint-208000/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "config.json",
3
+ "_remove_final_layer_norm": false,
4
+ "activation_dropout": 0.0,
5
+ "activation_function": "relu",
6
+ "architectures": [
7
+ "OPTForCausalLM"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "bos_token_id": 2,
11
+ "do_layer_norm_before": true,
12
+ "dropout": 0.1,
13
+ "enable_bias": true,
14
+ "eos_token_id": 3,
15
+ "ffn_dim": 1024,
16
+ "hidden_size": 256,
17
+ "init_std": 0.02,
18
+ "layer_norm_elementwise_affine": true,
19
+ "layerdrop": 0.0,
20
+ "mask_token_id": 4,
21
+ "max_position_embeddings": 512,
22
+ "model_type": "opt",
23
+ "num_attention_heads": 8,
24
+ "num_hidden_layers": 6,
25
+ "pad_token_id": 0,
26
+ "prefix": "</s>",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.41.2",
29
+ "unk_token_id": 1,
30
+ "use_cache": true,
31
+ "vocab_size": 21051,
32
+ "word_embed_proj_dim": 256
33
+ }
checkpoint/checkpoint-208000/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": 3,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.41.2"
7
+ }
checkpoint/checkpoint-208000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4e418c83bf108189fd81285f0ed7954ae2921b2ae7432420b165a7fc5b04b1d
3
+ size 41050072
checkpoint/checkpoint-208000/special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
checkpoint/checkpoint-208000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint/checkpoint-208000/tokenizer_config.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "mask_token": "[MASK]",
47
+ "model_max_length": 512,
48
+ "pad_token": "[PAD]",
49
+ "sep_token": "[SEP]",
50
+ "tokenizer_class": "PreTrainedTokenizerFast",
51
+ "unk_token": "[UNK]"
52
+ }
checkpoint/checkpoint-208000/trainer_state.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48bb3e6ae87b4e708aa7396c7c4e098f8e649fae204ce499a5e241f8f15f1d4d
3
+ size 18288878
checkpoint/runs/Oct22_09-55-27_localhost.tmu/events.out.tfevents.1729562691.localhost.tmu.78271.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05488e1afed78267b4632b91dfe5a6b2d74248d6738ff8e5b708eb3459211abf
3
+ size 22337712
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/opt-125m",
3
+ "_attn_implementation": "flash_attention_2",
4
+ "activation_dropout": 0.0,
5
+ "activation_function": "relu",
6
+ "architectures": [
7
+ "OPTForCausalLM"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "do_layer_norm_before": true,
11
+ "dropout": 0.1,
12
+ "pad_token_id": 0,
13
+ "unk_token_id": 1,
14
+ "bos_token_id": 2,
15
+ "eos_token_id": 3,
16
+ "mask_token_id": 4,
17
+ "ffn_dim": 1024,
18
+ "hidden_size": 256,
19
+ "init_std": 0.02,
20
+ "layerdrop": 0.0,
21
+ "max_position_embeddings": 512,
22
+ "model_type": "opt",
23
+ "num_attention_heads": 8,
24
+ "num_hidden_layers": 6,
25
+ "prefix": "</s>",
26
+ "torch_dtype": "float16",
27
+ "transformers_version": "4.21.0.dev0",
28
+ "use_cache": true,
29
+ "vocab_size": 21051,
30
+ "word_embed_proj_dim": 256
31
+ }
tokenizer/bertbuildtokenizer.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tokenizers import Tokenizer
2
+ from tokenizers.models import WordLevel
3
+ from tokenizers.trainers import WordLevelTrainer
4
+ from tokenizers.pre_tokenizers import Whitespace
5
+ from transformers import PreTrainedTokenizerFast
6
+ from tokenizers.processors import TemplateProcessing
7
+ import os
8
+ import json
9
+
10
+ def build_tokenizer(files):
11
+ assert type(files) == list and len(files) > 0
12
+
13
+ # Build word-level tokenizer, i.e. tokenize sentences by whitespace.
14
+ tokenizer = Tokenizer(WordLevel(unk_token="[UNK]"))
15
+ trainer = WordLevelTrainer(special_tokens=["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"])
16
+ tokenizer.pre_tokenizer = Whitespace()
17
+ tokenizer.train(files, trainer)
18
+
19
+ return tokenizer
20
+
21
+
22
+ def tokenizer_from_file(tokenizer_file):
23
+ tokenizer = Tokenizer.from_file(tokenizer_file)
24
+
25
+ #sentinel_tokens = [(f"<extra_id_{i}>", tokenizer.token_to_id(f"<extra_id_{i}>")) for i in range(100)]
26
+ # For BERT, we want our tokenizer to automatically add special tokens, like "[CLS]" or "[SEP]".
27
+ # GPT des not requires [CLS] and [SEP] at pretraining while BERT requires them.
28
+ #+https://swethatanamala.github.io/2018/12/24/summary-of-bert-paper/
29
+ # GPT converges faster by adding [BOS] and [EOS] than without [BOS] and [EOS].
30
+ tokenizer.post_processor = TemplateProcessing(
31
+ single="[CLS] $A [SEP]", # BERT
32
+ ##single="[BOS] $A [EOS]", # GPT
33
+ ##single="$A </s>",
34
+ pair="[CLS] $A [SEP] $B:1 [SEP]:1",
35
+ special_tokens=[
36
+ ("[PAD]", tokenizer.token_to_id("[PAD]")),
37
+ ("[UNK]", tokenizer.token_to_id("[UNK]")),
38
+ ("[CLS]", tokenizer.token_to_id("[CLS]")),
39
+ ("[SEP]", tokenizer.token_to_id("[SEP]")),
40
+ ("[MASK]", tokenizer.token_to_id("[MASK]")),
41
+ ],
42
+ )
43
+
44
+ # Instantiate with a tokenizer object
45
+ tokenizer = PreTrainedTokenizerFast(
46
+ tokenizer_object=tokenizer, model_max_length=512,
47
+ pad_token='[PAD]', unk_token='[UNK]', cls_token='[CLS]',
48
+ sep_token='[SEP]', mask_token='[MASK]')
49
+
50
+ return tokenizer
51
+
52
+ if not os.path.exists("tmp.json"):
53
+ tokenizer = build_tokenizer(files = ["gene_rank_merge_2021Aug25.txt", "../t5/t5finetune_data_flat.csv"])
54
+ tokenizer.save("tmp.json")
55
+
56
+ d=json.load(open("tmp.json"))
57
+
58
+ #for i in range(7, 107):
59
+ # d['added_tokens'].append({'id':i, 'special': True, 'content': f"<extra_id_{i-7}>",'single_word': False,'lstrip': False,'rstrip': False,'normalized': False})
60
+
61
+ vmax = 0
62
+ for k, v in d['model']['vocab'].items():
63
+ if v > vmax:
64
+ vmax = v
65
+
66
+ assert vmax + 1 == len(d['model']['vocab'])
67
+
68
+ for i in range(0, 100):
69
+ ##d['model']['vocab'][f"extra_id_{i}"] = vmax + 1 + i
70
+ d['model']['vocab'][f"unused{i}"] = vmax + 1 + i
71
+
72
+ with open('bert.json','w') as f:
73
+ json.dump(d, f)
74
+
75
+
76
+ tk = tokenizer_from_file("bert.json")
77
+ tk.save_pretrained("berttokenizer")
78
+
79
+
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_max_length": 512, "pad_token": "[PAD]", "unk_token": "[UNK]", "cls_token": "[CLS]", "sep_token": "[SEP]", "mask_token": "[MASK]", "tokenizer_class": "PreTrainedTokenizerFast"}
train.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import copy
16
+ import logging
17
+ from dataclasses import dataclass, field
18
+ import pathlib
19
+ from typing import Dict, Optional, Sequence
20
+
21
+ import torch
22
+ import transformers
23
+ from torch.utils.data import Dataset
24
+ from transformers import Trainer
25
+ import json
26
+
27
+ IGNORE_INDEX = -100
28
+
29
+ @dataclass
30
+ class ModelArguments:
31
+ model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
32
+
33
+
34
+ @dataclass
35
+ class DataArguments:
36
+ data_path: str = field(default=None, metadata={"help": "Path to the training data."})
37
+
38
+
39
+ @dataclass
40
+ class TrainingArguments(transformers.TrainingArguments):
41
+ cache_dir: Optional[str] = field(default=None)
42
+ optim: str = field(default="adamw_torch")
43
+ model_max_length: int = field(
44
+ default=8192,
45
+ metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
46
+ )
47
+
48
+ local_rank = None
49
+
50
+ def rank0_print(*args):
51
+ if local_rank == 0:
52
+ print(*args)
53
+
54
+ class SupervisedDataset(Dataset):
55
+ """Dataset for supervised fine-tuning."""
56
+
57
+ def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizerFast):
58
+ super(SupervisedDataset, self).__init__()
59
+ logging.warning("Loading data...")
60
+ self.tokenizer = tokenizer
61
+ self.max_length = 64 # max number of genes
62
+ with open(data_path) as f:
63
+ self.list_data = [line.split()[0: self.max_length] for line in f if len(line.split()) >= self.max_length]
64
+
65
+ self.cached_input_ids = {}
66
+
67
+ def __len__(self):
68
+ return len(self.list_data)
69
+
70
+ def __getitem__(self, i) -> Dict[str, torch.Tensor]:
71
+ if i in self.cached_input_ids:
72
+ input_ids = self.cached_input_ids[i]
73
+ else:
74
+ input_ids = self.tokenizer(self.list_data[i], is_split_into_words=True)["input_ids"]
75
+ input_ids = torch.tensor(input_ids) # exclude EOS token
76
+ self.cached_input_ids[i] = input_ids
77
+
78
+ return dict(input_ids=input_ids, labels=input_ids)
79
+
80
+ @dataclass
81
+ class DataCollatorForSupervisedDataset(object):
82
+ """Collate examples for supervised fine-tuning."""
83
+
84
+ tokenizer: transformers.PreTrainedTokenizerFast
85
+
86
+ def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
87
+ input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels"))
88
+ input_ids = torch.nn.utils.rnn.pad_sequence(
89
+ input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id
90
+ )
91
+ labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX)
92
+ return dict(
93
+ input_ids=input_ids,
94
+ labels=labels,
95
+ attention_mask=(input_ids.ne(self.tokenizer.pad_token_id)).long(),
96
+ )
97
+
98
+
99
+ def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizerFast, data_args) -> Dict:
100
+ """Make dataset and collator for supervised fine-tuning."""
101
+ train_dataset = SupervisedDataset(tokenizer=tokenizer, data_path=data_args.data_path)
102
+ data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
103
+ ##data_collator = transformers.DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=True, mlm_probability=0.15)
104
+ return dict(train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator)
105
+
106
+
107
+ def train():
108
+ parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
109
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
110
+
111
+ #model = transformers.AutoModelForCausalLM.from_pretrained(
112
+ # model_args.model_name_or_path,
113
+ # cache_dir=training_args.cache_dir,
114
+ #)
115
+ config = transformers.AutoConfig.from_pretrained('config.json')
116
+ model = transformers.OPTForCausalLM(config)
117
+ #model = transformers.BertForMaskedLM(config)
118
+
119
+ model_size = sum(p.numel() for p in model.parameters() if p.requires_grad)/1e+6
120
+ rank0_print(model)
121
+ rank0_print(f"model_size: {model_size:.3f} Mb")
122
+
123
+ tokenizer = transformers.PreTrainedTokenizerFast.from_pretrained("tokenizer")
124
+
125
+ data_module = make_supervised_data_module(tokenizer=tokenizer, data_args=data_args)
126
+ trainer = Trainer(model=model, tokenizer=tokenizer, args=training_args, **data_module)
127
+
128
+ #trainer.train()
129
+ if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
130
+ trainer.train(resume_from_checkpoint=True)
131
+ else:
132
+ trainer.train()
133
+
134
+ trainer.save_state()
135
+ trainer.save_model(output_dir=training_args.output_dir)
136
+
137
+
138
+ if __name__ == "__main__":
139
+ train()
train.sh ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export CUDA_VISIBLE_DEVICES=2,3
2
+ ##--fsdp "full_shard auto_wrap" --fsdp_transformer_layer_cls_to_wrap 'OPTDecoderLayer' \
3
+ torchrun --nproc_per_node=2 --master_port=8081 train.py \
4
+ --data_path ../downstream_data/gene_ranking_20220803.txt \
5
+ --bf16 True \
6
+ --output_dir checkpoint \
7
+ --num_train_epochs 40 \
8
+ --per_device_train_batch_size 512 \
9
+ --per_device_eval_batch_size 4 \
10
+ --gradient_accumulation_steps 2 \
11
+ --evaluation_strategy "no" \
12
+ --save_strategy "steps" \
13
+ --save_steps 2000 \
14
+ --save_total_limit 1 \
15
+ --learning_rate 3e-4 \
16
+ --weight_decay 0.0 \
17
+ --warmup_ratio 0.03 \
18
+ --adam_beta1 0.90 \
19
+ --adam_beta2 0.95 \
20
+ --lr_scheduler_type "cosine" \
21
+ --logging_steps 2 \
22
+ --report_to tensorboard \
23
+ --tf32 True \
24
+ --dataloader_num_workers 1 \
25
+ --dataloader_persistent_workers True
26
+
27
+ #--data_path ../downstream_data/val.txt \