File size: 5,065 Bytes
d507e0c
 
 
 
 
 
 
 
 
8b9bd5a
d507e0c
 
 
 
 
8b9bd5a
d507e0c
6524016
d507e0c
562cf21
 
8b9bd5a
d507e0c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
562cf21
d507e0c
 
 
8b9bd5a
d507e0c
 
 
 
 
 
 
 
 
 
562cf21
 
8b9bd5a
 
d507e0c
 
 
6524016
 
d507e0c
 
 
 
 
 
 
 
 
 
 
8b9bd5a
 
 
 
d507e0c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6524016
d507e0c
 
 
6524016
d507e0c
562cf21
6524016
 
562cf21
d507e0c
 
 
d842ce9
8b9bd5a
 
 
562cf21
6524016
 
8b9bd5a
562cf21
d507e0c
8b9bd5a
 
d507e0c
 
562cf21
 
 
 
 
 
 
 
 
d507e0c
562cf21
d507e0c
 
8b9bd5a
 
d507e0c
 
 
 
562cf21
d507e0c
8b9bd5a
d507e0c
 
6524016
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
import torch
torch.backends.cuda.matmul.allow_tf32 = True
import random
from transformers import AutoTokenizer, AutoModelForCausalLM
from datasets import load_dataset
from transformers import TrainingArguments
from trl import SFTTrainer
from peft import LoraConfig


import time
random_seed = 42
torch.manual_seed(random_seed)
random.seed(random_seed)

dataset = load_dataset("Crystalcareai/Self-Discover-MM-Instruct-openai", split="train_sft")

n_ahead_talk_global = 4
n_passes_global = 2
n_ahead_global = 2
n_examples = 0



def model_init(params):
    original = False
    if params is None:
        params = {}
    else:
        params = params.params
    # save params to file
    n_ahead = params.get("n_ahead", n_ahead_global if not original else 1)
    n_ahead_talk = params.get("n_ahead_talk", n_ahead_talk_global if not original else 1)
    n_passes = params.get("n_passes", n_passes_global if not original else 1)
    gumbel_temperature = params.get("gumbel_temperature", 1)
    use_start_thought_token = params.get("use_start_thought_token", True)
    use_end_thought_token = params.get("use_end_thought_token", True)
    include_policy_loss = params.get("include_policy_loss", True)
    gumbel_detach = params.get("gumbel_detach", True)
    merged_talk_heads = params.get("merged_talk_heads", True)
    residual_think_head = params.get("residual_think_head", False)
    optimize_lm_head_only_at_start = params.get("optimize_lm_head_only_at_start", False)

    model_id = "Crystalcareai/Quiet-Star-Custom"
    tokenizer_id = model_id
    print("Loading model")
    model = AutoModelForCausalLM.from_pretrained(
        model_id,
        torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
        max_thoughts=n_ahead + n_ahead_talk + 1,
        merged_talk_heads=merged_talk_heads,
        merged_lm_and_talk_heads=False,
        merged_lm_and_think_heads=True,
        use_concat_talk_head=True,
        use_shallow_think=True,
        use_shallow_talk=False,
        use_complex_think_head=False,
        use_complex_talk_head=True,
        use_weighted_talk_head=True,
        trust_remote_code=True,  
        device_map="auto",
        # load_in_4bit=True,
        # attn_implementation="flash_attention_2",
    )
    print("Loaded model")
   
    tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
    tokenizer.padding_side = 'left'  # Adjust padding side to 'left' to avoid batch generation issues with Flash Attention
    tokenizer.pad_token_id = tokenizer.eos_token_id

    special_tokens_to_add = []
    if model.use_start_thought_token:
        special_tokens_to_add.append("<|startthought|>")
    if model.use_end_thought_token:
        special_tokens_to_add.append("<|endthought|>")
    if special_tokens_to_add:
        tokenizer.add_special_tokens({"additional_special_tokens": special_tokens_to_add})
        model.resize_token_embeddings(len(tokenizer))
    model.tokenizer = tokenizer
    for name, module in model.named_modules():
        if "embed" in name:
            print(module, flush=True)
   
    model.gumbel_detach = gumbel_detach
    model.include_policy_loss = include_policy_loss
    model.use_end_thought_token = use_end_thought_token
    model.use_start_thought_token = use_start_thought_token
    model.n_ahead = n_ahead
    model.n_ahead_talk = n_ahead_talk
    model.n_passes = n_passes
    model.residual_think_head = residual_think_head
    model.optimize_lm_head_only_at_start = optimize_lm_head_only_at_start
    model.gumbel_temperature = gumbel_temperature
    model.original_mode = original
    model.config_params = params
    model.run_start = int(time.time())
    model.train()
    return model

max_seq_length = 2048
run_id = int(time.time())
training_args = TrainingArguments(
    output_dir="./out",
    num_train_epochs=3,
    per_device_train_batch_size=1,
    gradient_checkpointing=False,
    gradient_accumulation_steps=16,
    optim="adamw_torch_fused",
    logging_steps=1,
    save_strategy="steps",
    save_steps=300,
    bf16=True,
    tf32=False,
    # epsilson=1e-05,
    # beta1=0.9,
    # beta2=0.95,
    # auto_find_batch_size=True
    learning_rate=2e-07,
    max_grad_norm=1.0,  # Gradient clipping with a maximum gradient norm of 0.3
    warmup_steps=10,
    lr_scheduler_type="cosine",
    push_to_hub=False,
    report_to="wandb"
    
)

# peft_config = LoraConfig(
#           r = 16, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
#     target_modules = ["q_proj", "k_proj", "v_proj", "o_proj",
#                       "gate_proj", "up_proj", "down_proj",],
#     lora_alpha = 16,
#     lora_dropout = 0, # Supports any, but = 0 is optimized
#     bias = "none", # Enable Dora method
#     use_dora=True,
# )

torch.autograd.set_detect_anomaly(True)
model = model_init(None)  # Initialize the model

tokenizer = model.tokenizer  
        
trainer = SFTTrainer(
    args=training_args,
    train_dataset=dataset,
    model=model,
    # peft_config=peft_config,
    tokenizer=tokenizer,  
    max_seq_length=max_seq_length,
)

trainer.train()