Upload openlem-finetuning.py
Browse files- openlem-finetuning.py +273 -0
openlem-finetuning.py
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 전체 실행 스크립트 — projection-only 튜닝 (명시적 head 변수 사용)
|
| 2 |
+
import os, json, requests, numpy as np, tensorflow as tf
|
| 3 |
+
from tensorflow.keras import layers, Model
|
| 4 |
+
import sentencepiece as spm
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
|
| 7 |
+
# ========== 설정 ==========
|
| 8 |
+
TOKENIZER_PATH = "bpe.model"
|
| 9 |
+
DATA_PATH = "dataset_shuffled.jsonl"
|
| 10 |
+
MODEL_PATH = "encoder_fit.weights.h5"
|
| 11 |
+
MAX_LEN = 384
|
| 12 |
+
EMBED_DIM = 512
|
| 13 |
+
LATENT_DIM = 512
|
| 14 |
+
BATCH_SIZE = 768
|
| 15 |
+
EPOCHS = 1
|
| 16 |
+
SHUFFLE_BUFFER = 200000
|
| 17 |
+
LEARNING_RATE = 5e-4
|
| 18 |
+
TEMPERATURE = 0.05
|
| 19 |
+
SEED = 42
|
| 20 |
+
|
| 21 |
+
np.random.seed(SEED)
|
| 22 |
+
tf.random.set_seed(SEED)
|
| 23 |
+
tf.get_logger().setLevel("ERROR")
|
| 24 |
+
|
| 25 |
+
# ========== TPU / 분산 전략 ==========
|
| 26 |
+
try:
|
| 27 |
+
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu="local")
|
| 28 |
+
tf.tpu.experimental.initialize_tpu_system(resolver)
|
| 29 |
+
strategy = tf.distribute.TPUStrategy(resolver)
|
| 30 |
+
ON_TPU = True
|
| 31 |
+
print("✅ TPU 초기화 완료")
|
| 32 |
+
except Exception as e:
|
| 33 |
+
strategy = tf.distribute.get_strategy()
|
| 34 |
+
ON_TPU = False
|
| 35 |
+
print("⚠️ TPU 미사용, GPU/CPU 진행:", e)
|
| 36 |
+
|
| 37 |
+
from tensorflow.keras import mixed_precision
|
| 38 |
+
policy = mixed_precision.Policy("mixed_bfloat16" if ON_TPU else "float32")
|
| 39 |
+
mixed_precision.set_global_policy(policy)
|
| 40 |
+
print("Mixed precision policy:", policy)
|
| 41 |
+
|
| 42 |
+
# ========== Tokenizer ==========
|
| 43 |
+
sp = spm.SentencePieceProcessor()
|
| 44 |
+
sp.load(TOKENIZER_PATH)
|
| 45 |
+
pad_id = sp.piece_to_id("<pad>")
|
| 46 |
+
if pad_id == -1:
|
| 47 |
+
pad_id = 0
|
| 48 |
+
vocab_size = sp.get_piece_size()
|
| 49 |
+
print("vocab_size:", vocab_size, "pad_id:", pad_id)
|
| 50 |
+
|
| 51 |
+
def encode_sentence_np(s: str, max_len=MAX_LEN):
|
| 52 |
+
ids = sp.encode(s, out_type=int)[:max_len]
|
| 53 |
+
if len(ids) < max_len:
|
| 54 |
+
ids = ids + [pad_id] * (max_len - len(ids))
|
| 55 |
+
return np.array(ids, dtype=np.int32)
|
| 56 |
+
|
| 57 |
+
# ========== 모델 정의 (원본 구조 유지) ==========
|
| 58 |
+
class DynamicConv(layers.Layer):
|
| 59 |
+
def __init__(self, d_model, k=7):
|
| 60 |
+
super().__init__()
|
| 61 |
+
assert k % 2 == 1
|
| 62 |
+
self.k = k
|
| 63 |
+
self.dense = layers.Dense(d_model, activation='silu')
|
| 64 |
+
self.proj = layers.Dense(d_model)
|
| 65 |
+
self.generator = layers.Dense(k, dtype='float32')
|
| 66 |
+
def call(self, x):
|
| 67 |
+
x_in = x
|
| 68 |
+
x = tf.cast(x, tf.float32)
|
| 69 |
+
B = tf.shape(x)[0]; L = tf.shape(x)[1]; D = tf.shape(x)[2]
|
| 70 |
+
kernels = self.generator(self.dense(x))
|
| 71 |
+
kernels = tf.nn.softmax(kernels, axis=-1)
|
| 72 |
+
pad = (self.k - 1) // 2
|
| 73 |
+
x_pad = tf.pad(x, [[0,0],[pad,pad],[0,0]])
|
| 74 |
+
x_pad_4d = tf.expand_dims(x_pad, axis=1)
|
| 75 |
+
patches = tf.image.extract_patches(images=x_pad_4d,
|
| 76 |
+
sizes=[1,1,self.k,1],
|
| 77 |
+
strides=[1,1,1,1],
|
| 78 |
+
rates=[1,1,1,1],
|
| 79 |
+
padding='VALID')
|
| 80 |
+
patches = tf.reshape(patches, [B, L, self.k, D])
|
| 81 |
+
out = tf.reduce_sum(patches * tf.expand_dims(kernels, -1), axis=2)
|
| 82 |
+
out = self.proj(out)
|
| 83 |
+
return tf.cast(out, x_in.dtype)
|
| 84 |
+
|
| 85 |
+
class EncoderBlock(layers.Layer):
|
| 86 |
+
def __init__(self, embed_dim=EMBED_DIM, ff_dim=1152, num_conv_layers=2):
|
| 87 |
+
super().__init__()
|
| 88 |
+
self.fc1 = layers.Dense(ff_dim)
|
| 89 |
+
self.fc2 = layers.Dense(embed_dim)
|
| 90 |
+
self.blocks = [DynamicConv(d_model=embed_dim, k=7) for _ in range(num_conv_layers)]
|
| 91 |
+
self.ln = layers.LayerNormalization(epsilon=1e-5)
|
| 92 |
+
self.ln1 = layers.LayerNormalization(epsilon=1e-5)
|
| 93 |
+
self.ln2 = layers.LayerNormalization(epsilon=1e-5)
|
| 94 |
+
def call(self, x, training=None):
|
| 95 |
+
x_norm = self.ln(x)
|
| 96 |
+
out = x_norm
|
| 97 |
+
for block in self.blocks:
|
| 98 |
+
out = block(out)
|
| 99 |
+
x = x_norm + self.ln1(out)
|
| 100 |
+
v = out
|
| 101 |
+
h = self.fc1(v)
|
| 102 |
+
g, v_split = tf.split(h, 2, axis=-1)
|
| 103 |
+
h = tf.nn.silu(g) * v_split
|
| 104 |
+
h = self.fc2(h)
|
| 105 |
+
x = x + self.ln2(h)
|
| 106 |
+
return x
|
| 107 |
+
|
| 108 |
+
class L2NormLayer(layers.Layer):
|
| 109 |
+
def __init__(self, axis=1, epsilon=1e-10):
|
| 110 |
+
super().__init__()
|
| 111 |
+
self.axis = axis
|
| 112 |
+
self.epsilon = epsilon
|
| 113 |
+
def call(self, inputs):
|
| 114 |
+
return tf.math.l2_normalize(inputs, axis=self.axis, epsilon=self.epsilon)
|
| 115 |
+
|
| 116 |
+
class SentenceEncoder(Model):
|
| 117 |
+
def __init__(self, vocab_size, embed_dim=EMBED_DIM, latent_dim=LATENT_DIM, max_len=MAX_LEN, pad_id=pad_id, dropout_rate=0.1):
|
| 118 |
+
super().__init__()
|
| 119 |
+
self.pad_id = pad_id
|
| 120 |
+
self.embed = layers.Embedding(vocab_size, embed_dim)
|
| 121 |
+
self.pos_embed = layers.Embedding(input_dim=max_len, output_dim=embed_dim)
|
| 122 |
+
self.dropout = layers.Dropout(dropout_rate)
|
| 123 |
+
self.blocks = [EncoderBlock() for _ in range(2)]
|
| 124 |
+
self.attn_pool = layers.Dense(1)
|
| 125 |
+
self.ln_f = layers.LayerNormalization(epsilon=1e-5, dtype=tf.float32)
|
| 126 |
+
self.latent = layers.Dense(latent_dim)
|
| 127 |
+
self.l2norm = L2NormLayer(axis=1)
|
| 128 |
+
def call(self, x, training=None):
|
| 129 |
+
positions = tf.range(tf.shape(x)[1])[tf.newaxis, :]
|
| 130 |
+
x_embed = self.embed(x) + self.pos_embed(positions)
|
| 131 |
+
x_embed = self.dropout(x_embed, training=training)
|
| 132 |
+
mask = tf.cast(tf.not_equal(x, self.pad_id), tf.float32)
|
| 133 |
+
h = x_embed
|
| 134 |
+
for block in self.blocks:
|
| 135 |
+
h = block(h, training=training)
|
| 136 |
+
h = self.ln_f(h)
|
| 137 |
+
scores = self.attn_pool(h)
|
| 138 |
+
scores = tf.cast(scores, tf.float32)
|
| 139 |
+
scores = tf.where(mask[..., tf.newaxis] == 0, tf.constant(-1e9, tf.float32), scores)
|
| 140 |
+
scores = tf.nn.softmax(scores, axis=1)
|
| 141 |
+
pooled = tf.reduce_sum(h * scores, axis=1)
|
| 142 |
+
latent = self.latent(pooled)
|
| 143 |
+
latent = self.l2norm(latent)
|
| 144 |
+
return tf.cast(latent, tf.float32)
|
| 145 |
+
|
| 146 |
+
# ========== 모델 생성·build·가중치 로드 및 head 변수 명시적 수집 ==========
|
| 147 |
+
with strategy.scope():
|
| 148 |
+
encoder = SentenceEncoder(vocab_size=vocab_size)
|
| 149 |
+
# 1) build (필수)
|
| 150 |
+
encoder(np.zeros((1, MAX_LEN), dtype=np.int32))
|
| 151 |
+
# 2) load weights if exist
|
| 152 |
+
if os.path.exists(MODEL_PATH):
|
| 153 |
+
try:
|
| 154 |
+
encoder.load_weights(MODEL_PATH)
|
| 155 |
+
print("Loaded weights from", MODEL_PATH)
|
| 156 |
+
except Exception as e:
|
| 157 |
+
print("Warning: load_weights failed:", e)
|
| 158 |
+
# 3) freeze 전체(편하게)
|
| 159 |
+
encoder.trainable = False
|
| 160 |
+
# 4) ensure head layers exist and set them trainable (layer-level)
|
| 161 |
+
head_layers = []
|
| 162 |
+
for name in ("attn_pool", "ln_f", "latent"):
|
| 163 |
+
layer = getattr(encoder, name, None)
|
| 164 |
+
if layer is None:
|
| 165 |
+
print(f"Warning: encoder has no attribute '{name}'")
|
| 166 |
+
else:
|
| 167 |
+
layer.trainable = True
|
| 168 |
+
head_layers.append(layer)
|
| 169 |
+
# 5) call once more to ensure any lazy variable creation runs
|
| 170 |
+
encoder(np.zeros((1, MAX_LEN), dtype=np.int32))
|
| 171 |
+
# 6) collect trainable variables explicitly from head_layers
|
| 172 |
+
trainable_vars = []
|
| 173 |
+
for layer in head_layers:
|
| 174 |
+
# layer.trainable_weights gives variables of that layer which are trainable
|
| 175 |
+
for v in layer.trainable_weights:
|
| 176 |
+
trainable_vars.append(v)
|
| 177 |
+
# safety: if still empty, dump info and raise
|
| 178 |
+
if len(trainable_vars) == 0:
|
| 179 |
+
print("ERROR: no head trainable vars found. Dumping all variables:")
|
| 180 |
+
for v in encoder.variables:
|
| 181 |
+
print(v.name, "shape", v.shape, "trainable:", v.trainable)
|
| 182 |
+
raise RuntimeError("No trainable head variables found - aborting.")
|
| 183 |
+
total_trainable = sum(int(np.prod(v.shape)) for v in trainable_vars)
|
| 184 |
+
print("Collected head layers:", [l.name for l in head_layers])
|
| 185 |
+
print("Trainable var count (head):", len(trainable_vars), "params:", total_trainable)
|
| 186 |
+
# 7) optimizer must be created in scope
|
| 187 |
+
optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)
|
| 188 |
+
|
| 189 |
+
# ========== tf.data parsing ==========
|
| 190 |
+
AUTOTUNE = tf.data.AUTOTUNE
|
| 191 |
+
|
| 192 |
+
def _py_encode_line(line):
|
| 193 |
+
raw = line.numpy()
|
| 194 |
+
if isinstance(raw, bytes):
|
| 195 |
+
s = raw.decode("utf-8")
|
| 196 |
+
else:
|
| 197 |
+
s = str(raw)
|
| 198 |
+
j = json.loads(s)
|
| 199 |
+
q = encode_sentence_np(j.get("query",""))
|
| 200 |
+
d = encode_sentence_np(j.get("document",""))
|
| 201 |
+
n = encode_sentence_np(j.get("hard_negative",""))
|
| 202 |
+
return q, d, n
|
| 203 |
+
|
| 204 |
+
def parse_line(line):
|
| 205 |
+
q,d,n = tf.py_function(_py_encode_line, [line], [tf.int32, tf.int32, tf.int32])
|
| 206 |
+
q.set_shape([MAX_LEN]); d.set_shape([MAX_LEN]); n.set_shape([MAX_LEN])
|
| 207 |
+
return q,d,n
|
| 208 |
+
|
| 209 |
+
ds = tf.data.TextLineDataset(DATA_PATH)
|
| 210 |
+
ds = ds.map(lambda x: tf.strings.strip(x), num_parallel_calls=AUTOTUNE)
|
| 211 |
+
ds = ds.filter(lambda x: tf.not_equal(x, ""))
|
| 212 |
+
ds = ds.map(parse_line, num_parallel_calls=AUTOTUNE)
|
| 213 |
+
ds = ds.shuffle(SHUFFLE_BUFFER, seed=SEED)
|
| 214 |
+
ds = ds.repeat()
|
| 215 |
+
ds = ds.batch(BATCH_SIZE, drop_remainder=True)
|
| 216 |
+
ds = ds.prefetch(AUTOTUNE)
|
| 217 |
+
|
| 218 |
+
# sample check
|
| 219 |
+
try:
|
| 220 |
+
sample = next(iter(ds.take(1)))
|
| 221 |
+
print("Sample batch shapes:", [t.shape for t in sample])
|
| 222 |
+
except Exception as e:
|
| 223 |
+
print("Warning: sample extraction failed:", e)
|
| 224 |
+
|
| 225 |
+
# ========== loss function ==========
|
| 226 |
+
@tf.function
|
| 227 |
+
def compute_loss_and_logits(q_emb, p_emb, n_emb, temperature):
|
| 228 |
+
docs = tf.concat([p_emb, n_emb], axis=0) # (2B, D)
|
| 229 |
+
logits = tf.matmul(q_emb, docs, transpose_b=True) # (B, 2B)
|
| 230 |
+
logits = logits / tf.cast(temperature, logits.dtype)
|
| 231 |
+
labels = tf.range(tf.shape(q_emb)[0], dtype=tf.int32)
|
| 232 |
+
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)
|
| 233 |
+
return tf.reduce_mean(loss), logits
|
| 234 |
+
|
| 235 |
+
# ========== train step (explicit trainable_vars) ==========
|
| 236 |
+
@tf.function
|
| 237 |
+
def train_step(q_batch, p_batch, n_batch):
|
| 238 |
+
def step_fn(q, p, n):
|
| 239 |
+
with tf.GradientTape() as tape:
|
| 240 |
+
q_emb = encoder(q, training=True)
|
| 241 |
+
p_emb = encoder(p, training=True)
|
| 242 |
+
n_emb = encoder(n, training=True)
|
| 243 |
+
loss, _ = compute_loss_and_logits(q_emb, p_emb, n_emb, TEMPERATURE)
|
| 244 |
+
reg_loss = tf.add_n(encoder.losses) if encoder.losses else 0.0
|
| 245 |
+
total_loss = loss + reg_loss
|
| 246 |
+
grads = tape.gradient(total_loss, trainable_vars)
|
| 247 |
+
# replace None grads with zeros (safe)
|
| 248 |
+
grads = [tf.zeros_like(v) if g is None else g for g, v in zip(grads, trainable_vars)]
|
| 249 |
+
optimizer.apply_gradients(zip(grads, trainable_vars))
|
| 250 |
+
return total_loss
|
| 251 |
+
per_replica_loss = strategy.run(step_fn, args=(q_batch, p_batch, n_batch))
|
| 252 |
+
return strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=None)
|
| 253 |
+
|
| 254 |
+
# ========== training loop ==========
|
| 255 |
+
with open(DATA_PATH, "r", encoding="utf-8") as f:
|
| 256 |
+
num_lines = sum(1 for _ in f)
|
| 257 |
+
steps_per_epoch = max(1, num_lines // BATCH_SIZE)
|
| 258 |
+
print("num_lines:", num_lines, "steps_per_epoch:", steps_per_epoch)
|
| 259 |
+
|
| 260 |
+
it = iter(ds)
|
| 261 |
+
global_step = 0
|
| 262 |
+
for epoch in range(EPOCHS):
|
| 263 |
+
print(f"\nEpoch {epoch+1}/{EPOCHS}")
|
| 264 |
+
pbar = tqdm(range(steps_per_epoch), desc="training", ncols=120)
|
| 265 |
+
for step in pbar:
|
| 266 |
+
batch = next(it)
|
| 267 |
+
loss = train_step(batch[0], batch[1], batch[2])
|
| 268 |
+
global_step += 1
|
| 269 |
+
pbar.set_postfix({"loss": f"{float(loss.numpy()):.4f}"})
|
| 270 |
+
encoder.save_weights(MODEL_PATH)
|
| 271 |
+
print("Saved weights:", MODEL_PATH)
|
| 272 |
+
|
| 273 |
+
print("Training finished.")
|