|
|
|
|
|
import os, requests, math |
|
|
import numpy as np |
|
|
import tensorflow as tf |
|
|
from tensorflow.keras import layers, Model |
|
|
import sentencepiece as spm |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TOKENIZER_PATH = "bpe.model" |
|
|
DATA_PATH = "shuffled_corpus.txt" |
|
|
MAX_LEN = 384 |
|
|
EMBED_DIM = 768 |
|
|
LATENT_DIM = 768 |
|
|
BATCH_SIZE = 768 |
|
|
EPOCHS = 1 |
|
|
SHUFFLE_BUFFER = 200000 |
|
|
LEARNING_RATE = 1e-4 |
|
|
TEMPERATURE = 0.05 |
|
|
DROPOUT_AUG = 0.1 |
|
|
EMBED_DROPOUT = 0.1 |
|
|
SEED = 42 |
|
|
|
|
|
print('1') |
|
|
tf.get_logger().setLevel("ERROR") |
|
|
tf.random.set_seed(SEED) |
|
|
np.random.seed(SEED) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
on_tpu = False |
|
|
try: |
|
|
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu="local") |
|
|
tf.tpu.experimental.initialize_tpu_system(resolver) |
|
|
strategy = tf.distribute.TPUStrategy(resolver) |
|
|
print("โ
TPU ์ด๊ธฐํ ์๋ฃ:", resolver.cluster_spec().as_dict()) |
|
|
on_tpu = True |
|
|
except Exception as e: |
|
|
print("โ ๏ธ TPU ๋ฏธ์ฌ์ฉ, GPU/CPU๋ก ์งํ:", e) |
|
|
strategy = tf.distribute.get_strategy() |
|
|
|
|
|
|
|
|
from tensorflow.keras import mixed_precision |
|
|
policy = mixed_precision.Policy("mixed_bfloat16" if on_tpu else "float32") |
|
|
mixed_precision.set_global_policy(policy) |
|
|
print("โ
Mixed precision:", policy) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def download_file(url, save_path): |
|
|
if os.path.exists(save_path): |
|
|
print(f"exists: {save_path}") |
|
|
return |
|
|
print(f"Downloading {save_path} ...") |
|
|
r = requests.get(url, stream=True) |
|
|
r.raise_for_status() |
|
|
with open(save_path, "wb") as f: |
|
|
for chunk in r.iter_content(8192*2): |
|
|
if not chunk: |
|
|
break |
|
|
f.write(chunk) |
|
|
print(f"โ
{save_path} saved") |
|
|
|
|
|
|
|
|
download_file( |
|
|
"https://huggingface.co/datasets/OpenLab-NLP/ko-corpus/resolve/main/bpe.model?download=true", |
|
|
TOKENIZER_PATH |
|
|
) |
|
|
download_file( |
|
|
"https://huggingface.co/datasets/OpenLab-NLP/ko-corpus/resolve/main/shuffled_corpus%20(1).txt?download=true", |
|
|
DATA_PATH |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sp = spm.SentencePieceProcessor() |
|
|
sp.load(TOKENIZER_PATH) |
|
|
pad_id = sp.piece_to_id("<pad>") |
|
|
if pad_id == -1: |
|
|
pad_id = 0 |
|
|
vocab_size = sp.get_piece_size() |
|
|
print("vocab_size:", vocab_size, "pad_id:", pad_id) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def encode_sentence_py(s: str): |
|
|
ids = sp.encode(s, out_type=int)[:MAX_LEN] |
|
|
if len(ids) < MAX_LEN: |
|
|
ids = ids + [pad_id] * (MAX_LEN - len(ids)) |
|
|
else: |
|
|
ids = ids[:MAX_LEN] |
|
|
return np.array(ids, dtype=np.int32) |
|
|
|
|
|
def tf_encode(line): |
|
|
def _encode_py(s_tensor): |
|
|
s = s_tensor.numpy().decode("utf-8") |
|
|
return encode_sentence_py(s) |
|
|
ids = tf.py_function(func=_encode_py, inp=[line], Tout=tf.int32) |
|
|
ids.set_shape([MAX_LEN]) |
|
|
return ids |
|
|
|
|
|
def token_dropout(tokens, drop_prob=DROPOUT_AUG): |
|
|
rnd = tf.random.uniform(tf.shape(tokens), 0, 1) |
|
|
keep_mask = rnd > drop_prob |
|
|
return tf.where(keep_mask, tokens, tf.cast(pad_id, tf.int32)) |
|
|
|
|
|
def make_views(tokens): |
|
|
v1 = token_dropout(tokens) |
|
|
v2 = token_dropout(tokens) |
|
|
return v1, v2 |
|
|
|
|
|
|
|
|
ds = tf.data.TextLineDataset(DATA_PATH) |
|
|
ds = ds.map(lambda x: tf.strings.strip(x), num_parallel_calls=tf.data.AUTOTUNE) |
|
|
ds = ds.filter(lambda x: tf.not_equal(x, "")) |
|
|
|
|
|
ds = ds.map(tf_encode, num_parallel_calls=tf.data.AUTOTUNE) |
|
|
ds = ds.shuffle(SHUFFLE_BUFFER, seed=SEED) |
|
|
ds = ds.repeat() |
|
|
ds = ds.map(lambda t: make_views(t), num_parallel_calls=tf.data.AUTOTUNE) |
|
|
ds = ds.batch(BATCH_SIZE, drop_remainder=True) |
|
|
|
|
|
ds = ds.map(lambda v1, v2: ((v1, v2), tf.zeros([BATCH_SIZE], dtype=tf.float32)), num_parallel_calls=tf.data.AUTOTUNE) |
|
|
ds = ds.prefetch(tf.data.AUTOTUNE) |
|
|
|
|
|
class DynamicConv(layers.Layer): |
|
|
def __init__(self, d_model, k=7): |
|
|
super().__init__() |
|
|
assert k % 2 == 1 |
|
|
self.k = k |
|
|
self.dense = layers.Dense(d_model, activation='gelu') |
|
|
self.proj = layers.Dense(d_model) |
|
|
|
|
|
self.generator = layers.Dense(k, dtype='float32') |
|
|
|
|
|
def call(self, x): |
|
|
x_in = x |
|
|
x = tf.cast(x, tf.float32) |
|
|
|
|
|
|
|
|
pad = (self.k - 1) // 2 |
|
|
x_pad = tf.pad(x, [[0,0],[pad,pad],[0,0]]) |
|
|
x_pad_4d = tf.expand_dims(x_pad, axis=1) |
|
|
|
|
|
patches = tf.image.extract_patches( |
|
|
images=x_pad_4d, |
|
|
sizes=[1, 1, self.k, 1], |
|
|
strides=[1, 1, 1, 1], |
|
|
rates=[1, 1, 1, 1], |
|
|
padding='VALID' |
|
|
) |
|
|
|
|
|
|
|
|
B = tf.shape(patches)[0] |
|
|
L = tf.shape(patches)[2] |
|
|
D = tf.shape(x)[2] |
|
|
patches = tf.reshape(patches, [B, L, self.k, D]) |
|
|
|
|
|
|
|
|
kernels = self.generator(self.dense(x)) |
|
|
kernels = tf.nn.softmax(kernels, axis=-1) |
|
|
|
|
|
kernels_exp = tf.expand_dims(kernels, axis=-1) |
|
|
out = tf.reduce_sum(patches * kernels_exp, axis=2) |
|
|
out = self.proj(out) |
|
|
|
|
|
return tf.cast(out, x_in.dtype) |
|
|
|
|
|
def compute_output_shape(self, input_shape): |
|
|
return input_shape |
|
|
|
|
|
|
|
|
class MixerBlock(layers.Layer): |
|
|
def __init__(self, seq_len, dim, token_mlp_dim=None, channel_mlp_dim=None, dropout=0.0): |
|
|
super().__init__() |
|
|
self.dim = dim |
|
|
|
|
|
self.ln_token = layers.LayerNormalization(epsilon=1e-6) |
|
|
self.ln_local = layers.LayerNormalization(epsilon=1e-6) |
|
|
self.ln_channel = layers.LayerNormalization(epsilon=1e-6) |
|
|
|
|
|
|
|
|
self.token_fc1 = layers.Dense(seq_len * 2) |
|
|
self.token_fc2 = layers.Dense(seq_len) |
|
|
|
|
|
|
|
|
self.ch_fc1 = layers.Dense(self.dim * 4) |
|
|
self.ch_fc2 = layers.Dense(self.dim) |
|
|
|
|
|
|
|
|
self.conv1 = DynamicConv(d_model=dim, k=5) |
|
|
|
|
|
def call(self, x, training=None): |
|
|
|
|
|
y = self.ln_local(x) |
|
|
y = self.conv1(y) |
|
|
x = x + y |
|
|
|
|
|
|
|
|
y = self.ln_token(x) |
|
|
y_t = tf.transpose(y, perm=[0, 2, 1]) |
|
|
y_t = self.token_fc1(y_t) |
|
|
a, b = tf.split(y_t, 2, axis=-1) |
|
|
y_t = self.token_fc2(a * tf.nn.gelu(b)) |
|
|
y = tf.transpose(y_t, perm=[0, 2, 1]) |
|
|
x = x + y |
|
|
|
|
|
|
|
|
y = self.ln_channel(x) |
|
|
a, b = tf.split(self.ch_fc1(y), 2, axis=-1) |
|
|
y = self.ch_fc2(a * tf.nn.gelu(b)) |
|
|
x = x + y |
|
|
|
|
|
return x |
|
|
|
|
|
def compute_output_shape(self, input_shape): |
|
|
return input_shape |
|
|
|
|
|
|
|
|
class L2NormLayer(layers.Layer): |
|
|
def __init__(self, axis=1, epsilon=1e-10, **kwargs): |
|
|
super().__init__(**kwargs) |
|
|
self.axis = axis |
|
|
self.epsilon = epsilon |
|
|
def call(self, inputs): |
|
|
return tf.math.l2_normalize(inputs, axis=self.axis, epsilon=self.epsilon) |
|
|
|
|
|
class SentenceEncoder(Model): |
|
|
def __init__(self, vocab_size, embed_dim=EMBED_DIM, latent_dim=LATENT_DIM, max_len=MAX_LEN, pad_id=pad_id, dropout_rate=EMBED_DROPOUT): |
|
|
super().__init__() |
|
|
self.pad_id = pad_id |
|
|
self.embed = layers.Embedding(vocab_size, embed_dim) |
|
|
self.pos_embed = layers.Embedding(input_dim=max_len, output_dim=embed_dim) |
|
|
self.dropout = layers.Dropout(dropout_rate) |
|
|
self.blocks = [MixerBlock(seq_len=MAX_LEN, dim=embed_dim, token_mlp_dim=256, channel_mlp_dim=embed_dim, dropout=0.1) for _ in range(2)] |
|
|
self.attn_pool = layers.Dense(1) |
|
|
|
|
|
self.ln_f = layers.LayerNormalization(epsilon=1e-5, dtype=tf.float32) |
|
|
|
|
|
self.latent = layers.Dense(latent_dim, activation=None) |
|
|
self.l2norm = L2NormLayer(axis=1) |
|
|
|
|
|
def call(self, x, training=None): |
|
|
positions = tf.range(tf.shape(x)[1])[tf.newaxis, :] |
|
|
x_embed = self.embed(x) + self.pos_embed(positions) |
|
|
x_embed = self.dropout(x_embed, training=training) |
|
|
|
|
|
mask = tf.cast(tf.not_equal(x, self.pad_id), tf.float32) |
|
|
|
|
|
h = x_embed |
|
|
for block in self.blocks: |
|
|
h = block(h) |
|
|
|
|
|
h = self.ln_f(h) |
|
|
|
|
|
|
|
|
scores = self.attn_pool(h) |
|
|
scores = tf.cast(scores, tf.float32) |
|
|
|
|
|
scores = tf.where(mask[..., tf.newaxis] == 0, tf.constant(-1e9, tf.float32), scores) |
|
|
scores = tf.nn.softmax(scores, axis=1) |
|
|
|
|
|
pooled = tf.reduce_sum(h * scores, axis=1) |
|
|
latent = self.latent(pooled) |
|
|
latent = self.l2norm(latent) |
|
|
|
|
|
|
|
|
return tf.cast(latent, tf.float32) |
|
|
|
|
|
|
|
|
def build_contrastive_model(vocab_size): |
|
|
encoder = SentenceEncoder(vocab_size=vocab_size) |
|
|
input1 = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name="view1") |
|
|
input2 = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name="view2") |
|
|
z1 = encoder(input1) |
|
|
z2 = encoder(input2) |
|
|
out = layers.Concatenate(axis=0)([z1, z2]) |
|
|
return Model(inputs=[input1, input2], outputs=out), encoder |
|
|
|
|
|
def nt_xent_loss(y_true, y_pred): |
|
|
|
|
|
z = y_pred |
|
|
z = tf.cast(z, tf.float32) |
|
|
sim = tf.matmul(z, z, transpose_b=True) |
|
|
sim = sim / TEMPERATURE |
|
|
|
|
|
diag = tf.eye(tf.shape(sim)[0]) |
|
|
sim = sim - diag * 1e9 |
|
|
N2 = tf.shape(sim)[0] |
|
|
N = N2 // 2 |
|
|
|
|
|
labels_pos = tf.concat([tf.range(N, N2), tf.range(0, N)], axis=0) |
|
|
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_pos, logits=sim) |
|
|
return tf.reduce_mean(loss) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with strategy.scope(): |
|
|
model, encoder = build_contrastive_model(vocab_size) |
|
|
optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE) |
|
|
model.compile(optimizer=optimizer, loss=nt_xent_loss) |
|
|
model.summary() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
with open(DATA_PATH, "r", encoding="utf-8") as f: |
|
|
num_lines = sum(1 for _ in f) |
|
|
except Exception as e: |
|
|
print("Warning: ๋ฐ์ดํฐ ํ์ผ ๋ผ์ธ ์ ๊ณ์ฐ ์คํจ:", e) |
|
|
num_lines = None |
|
|
|
|
|
if num_lines: |
|
|
steps_per_epoch = max(1, num_lines // BATCH_SIZE) |
|
|
else: |
|
|
|
|
|
steps_per_epoch = 1000 |
|
|
|
|
|
print("steps_per_epoch:", steps_per_epoch) |
|
|
|
|
|
|
|
|
history = model.fit(ds, epochs=EPOCHS, steps_per_epoch=steps_per_epoch, verbose=1) |
|
|
|
|
|
|
|
|
encoder.save_weights("encoder_fit.weights.h5") |
|
|
print("Training finished and weights saved.") |