File size: 5,048 Bytes
f53693f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
import torch
torch.backends.cuda.matmul.allow_tf32 = True
import random
from transformers import AutoTokenizer, AutoModelForCausalLM
from datasets import load_dataset
from transformers import TrainingArguments
from trl import SFTTrainer
from peft import LoraConfig
import time
random_seed = 42
torch.manual_seed(random_seed)
random.seed(random_seed)
dataset = load_dataset("HuggingFaceH4/deita-10k-v0-sft", split="train_sft")
n_ahead_talk_global = 2
n_passes_global = 2
n_ahead_global = 2
n_examples = 0
full_batch_size = 2
eval_and_logging_steps = 2
save_steps = 100
def model_init(params):
original = False
if params is None:
params = {}
else:
params = params.params
# save params to file
n_ahead = params.get("n_ahead", n_ahead_global if not original else 1)
n_ahead_talk = params.get("n_ahead_talk", n_ahead_talk_global if not original else 1)
n_passes = params.get("n_passes", n_passes_global if not original else 1)
gumbel_temperature = params.get("gumbel_temperature", 1)
use_start_thought_token = params.get("use_start_thought_token", True)
use_end_thought_token = params.get("use_end_thought_token", True)
include_policy_loss = params.get("include_policy_loss", True)
gumbel_detach = params.get("gumbel_detach", True)
merged_talk_heads = params.get("merged_talk_heads", True)
gradient_accumulation_steps = params.get("gradient_accumulation_steps", global_gradient_accumulation_steps)
residual_think_head = params.get("residual_think_head", False)
optimize_lm_head_only_at_start = params.get("optimize_lm_head_only_at_start", False)
model_id = "Crystalcareai/Quiet-Star-Custom"
tokenizer_id = model_id
print("Loading model")
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
max_thoughts=n_ahead + n_ahead_talk + 1,
merged_talk_heads=merged_talk_heads,
merged_lm_and_talk_heads=False,
merged_lm_and_think_heads=True,
use_concat_talk_head=True,
use_shallow_think=True,
use_shallow_talk=False,
use_complex_think_head=False,
use_complex_talk_head=True,
use_weighted_talk_head=True,
trust_remote_code=True,
device_map="auto",
)
print("Loaded model")
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id,padding=False,truncation=True)
tokenizer.pad_token_id = tokenizer.eos_token_id
special_tokens_to_add = []
if model.use_start_thought_token:
special_tokens_to_add.append("<|startthought|>")
if model.use_end_thought_token:
special_tokens_to_add.append("<|endthought|>")
if special_tokens_to_add:
tokenizer.add_special_tokens({"additional_special_tokens": special_tokens_to_add})
model.resize_token_embeddings(len(tokenizer))
model.tokenizer = tokenizer
model.gumbel_detach = gumbel_detach
model.include_policy_loss = include_policy_loss
model.use_end_thought_token = use_end_thought_token
model.use_start_thought_token = use_start_thought_token
model.n_ahead = n_ahead
model.n_ahead_talk = n_ahead_talk
model.n_passes = n_passes
model.n_tokens_print = gradient_accumulation_steps
model.gradient_accumulation_steps = gradient_accumulation_steps
model.residual_think_head = residual_think_head
model.optimize_lm_head_only_at_start = optimize_lm_head_only_at_start
model.gumbel_temperature = gumbel_temperature
model.original_mode = original
model.config_params = params
model.run_start = int(time.time())
model.kill_after = 100
model.train()
return model
batch_size = full_batch_size // n_passes_global
global_gradient_accumulation_steps = full_batch_size // batch_size
run_id = int(time.time())
training_args = TrainingArguments(
output_dir="./out",
num_train_epochs=3,
per_device_train_batch_size=1,
gradient_checkpointing=False,
gradient_accumulation_steps=4,
optim="adamw_torch_fused",
logging_steps=1,
save_strategy="steps",
save_steps=300,
bf16=True,
tf32=False,
# auto_find_batch_size=True
learning_rate=2e-07,
max_grad_norm=1.0, # Gradient clipping with a maximum gradient norm of 0.3
warmup_steps=100,
lr_scheduler_type="cosine",
push_to_hub=False,
)
# peft_config = LoraConfig(
# r = 16, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
# target_modules = ["q_proj", "k_proj", "v_proj", "o_proj",
# "gate_proj", "up_proj", "down_proj",],
# lora_alpha = 16,
# lora_dropout = 0, # Supports any, but = 0 is optimized
# bias = "none", # Enable Dora method
# use_dora=True,
# )
torch.autograd.set_detect_anomaly(True)
model = model_init(None) # Initialize the model
tokenizer = model.tokenizer
trainer = SFTTrainer(
args=training_args,
train_dataset=dataset,
model=model,
# peft_config=peft_config,
tokenizer=tokenizer,
)
trainer.train()
|