|
|
|
|
|
import os, requests, math |
|
|
import numpy as np |
|
|
import tensorflow as tf |
|
|
from tensorflow.keras import layers, Model |
|
|
import sentencepiece as spm |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TOKENIZER_PATH = "bpe.model" |
|
|
DATA_PATH = "shuffled_corpus.txt" |
|
|
MAX_LEN = 384 |
|
|
EMBED_DIM = 512 |
|
|
LATENT_DIM = 512 |
|
|
BATCH_SIZE = 768 |
|
|
EPOCHS = 1 |
|
|
SHUFFLE_BUFFER = 200000 |
|
|
LEARNING_RATE = 1e-4 |
|
|
TEMPERATURE = 0.05 |
|
|
DROPOUT_AUG = 0.1 |
|
|
EMBED_DROPOUT = 0.1 |
|
|
SEED = 42 |
|
|
|
|
|
print('1') |
|
|
tf.get_logger().setLevel("ERROR") |
|
|
tf.random.set_seed(SEED) |
|
|
np.random.seed(SEED) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
on_tpu = False |
|
|
try: |
|
|
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu="local") |
|
|
tf.tpu.experimental.initialize_tpu_system(resolver) |
|
|
strategy = tf.distribute.TPUStrategy(resolver) |
|
|
print("โ
TPU ์ด๊ธฐํ ์๋ฃ:", resolver.cluster_spec().as_dict()) |
|
|
on_tpu = True |
|
|
except Exception as e: |
|
|
print("โ ๏ธ TPU ๋ฏธ์ฌ์ฉ, GPU/CPU๋ก ์งํ:", e) |
|
|
strategy = tf.distribute.get_strategy() |
|
|
|
|
|
|
|
|
from tensorflow.keras import mixed_precision |
|
|
policy = mixed_precision.Policy("mixed_bfloat16" if on_tpu else "float32") |
|
|
mixed_precision.set_global_policy(policy) |
|
|
print("โ
Mixed precision:", policy) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def download_file(url, save_path): |
|
|
if os.path.exists(save_path): |
|
|
print(f"exists: {save_path}") |
|
|
return |
|
|
print(f"Downloading {save_path} ...") |
|
|
r = requests.get(url, stream=True) |
|
|
r.raise_for_status() |
|
|
with open(save_path, "wb") as f: |
|
|
for chunk in r.iter_content(8192*2): |
|
|
if not chunk: |
|
|
break |
|
|
f.write(chunk) |
|
|
print(f"โ
{save_path} saved") |
|
|
|
|
|
|
|
|
download_file( |
|
|
"https://huggingface.co/datasets/OpenLab-NLP/ko-corpus/resolve/main/bpe.model?download=true", |
|
|
TOKENIZER_PATH |
|
|
) |
|
|
download_file( |
|
|
"https://huggingface.co/datasets/OpenLab-NLP/ko-corpus/resolve/main/shuffled_corpus%20(1).txt?download=true", |
|
|
DATA_PATH |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sp = spm.SentencePieceProcessor() |
|
|
sp.load(TOKENIZER_PATH) |
|
|
pad_id = sp.piece_to_id("<pad>") |
|
|
if pad_id == -1: |
|
|
pad_id = 0 |
|
|
vocab_size = sp.get_piece_size() |
|
|
print("vocab_size:", vocab_size, "pad_id:", pad_id) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def encode_sentence_py(s: str): |
|
|
ids = sp.encode(s, out_type=int)[:MAX_LEN] |
|
|
if len(ids) < MAX_LEN: |
|
|
ids = ids + [pad_id] * (MAX_LEN - len(ids)) |
|
|
else: |
|
|
ids = ids[:MAX_LEN] |
|
|
return np.array(ids, dtype=np.int32) |
|
|
|
|
|
def tf_encode(line): |
|
|
def _encode_py(s_tensor): |
|
|
s = s_tensor.numpy().decode("utf-8") |
|
|
return encode_sentence_py(s) |
|
|
ids = tf.py_function(func=_encode_py, inp=[line], Tout=tf.int32) |
|
|
ids.set_shape([MAX_LEN]) |
|
|
return ids |
|
|
|
|
|
def token_dropout(tokens, drop_prob=DROPOUT_AUG): |
|
|
rnd = tf.random.uniform(tf.shape(tokens), 0, 1) |
|
|
keep_mask = rnd > drop_prob |
|
|
return tf.where(keep_mask, tokens, tf.cast(pad_id, tf.int32)) |
|
|
|
|
|
def make_views(tokens): |
|
|
v1 = token_dropout(tokens) |
|
|
v2 = token_dropout(tokens) |
|
|
return v1, v2 |
|
|
|
|
|
|
|
|
ds = tf.data.TextLineDataset(DATA_PATH) |
|
|
ds = ds.map(lambda x: tf.strings.strip(x), num_parallel_calls=tf.data.AUTOTUNE) |
|
|
ds = ds.filter(lambda x: tf.not_equal(x, "")) |
|
|
|
|
|
ds = ds.map(tf_encode, num_parallel_calls=tf.data.AUTOTUNE) |
|
|
ds = ds.shuffle(SHUFFLE_BUFFER, seed=SEED) |
|
|
ds = ds.repeat() |
|
|
ds = ds.map(lambda t: make_views(t), num_parallel_calls=tf.data.AUTOTUNE) |
|
|
ds = ds.batch(BATCH_SIZE, drop_remainder=True) |
|
|
|
|
|
ds = ds.map(lambda v1, v2: ((v1, v2), tf.zeros([BATCH_SIZE], dtype=tf.float32)), num_parallel_calls=tf.data.AUTOTUNE) |
|
|
ds = ds.prefetch(tf.data.AUTOTUNE) |
|
|
|
|
|
|
|
|
|
|
|
class MixerBlock(layers.Layer): |
|
|
def __init__(self, seq_len, dim, token_mlp_dim, channel_mlp_dim, dropout=0.0): |
|
|
super().__init__() |
|
|
self.seq_len = seq_len |
|
|
self.dim = dim |
|
|
self.token_mlp_dim = token_mlp_dim |
|
|
self.channel_mlp_dim = channel_mlp_dim |
|
|
|
|
|
self.ln1 = layers.LayerNormalization(epsilon=1e-6, dtype=tf.float32) |
|
|
|
|
|
self.token_fc1 = layers.Dense(token_mlp_dim, activation='gelu', dtype=tf.float32) |
|
|
self.token_fc2 = layers.Dense(seq_len, dtype=tf.float32) |
|
|
|
|
|
self.ln2 = layers.LayerNormalization(epsilon=1e-6, dtype=tf.float32) |
|
|
|
|
|
self.channel_fc1 = layers.Dense(channel_mlp_dim, activation='gelu', dtype=tf.float32) |
|
|
self.channel_fc2 = layers.Dense(dim, dtype=tf.float32) |
|
|
|
|
|
self.dropout = layers.Dropout(dropout) |
|
|
|
|
|
def call(self, x, training=None): |
|
|
|
|
|
B = tf.shape(x)[0] |
|
|
L = tf.shape(x)[1] |
|
|
D = tf.shape(x)[2] |
|
|
|
|
|
|
|
|
y = self.ln1(x) |
|
|
y_t = tf.transpose(y, perm=[0,2,1]) |
|
|
y_t = self.token_fc1(y_t) |
|
|
y_t = self.token_fc2(y_t) |
|
|
y = tf.transpose(y_t, perm=[0,2,1]) |
|
|
x = x + self.dropout(y, training=training) |
|
|
|
|
|
|
|
|
z = self.ln2(x) |
|
|
z = self.channel_fc1(z) |
|
|
z = self.channel_fc2(z) |
|
|
x = x + self.dropout(z, training=training) |
|
|
|
|
|
return x |
|
|
|
|
|
class L2NormLayer(layers.Layer): |
|
|
def __init__(self, axis=1, epsilon=1e-10, **kwargs): |
|
|
super().__init__(**kwargs) |
|
|
self.axis = axis |
|
|
self.epsilon = epsilon |
|
|
def call(self, inputs): |
|
|
return tf.math.l2_normalize(inputs, axis=self.axis, epsilon=self.epsilon) |
|
|
|
|
|
class SentenceEncoder(Model): |
|
|
def __init__(self, vocab_size, embed_dim=EMBED_DIM, latent_dim=LATENT_DIM, max_len=MAX_LEN, pad_id=pad_id, dropout_rate=EMBED_DROPOUT): |
|
|
super().__init__() |
|
|
self.pad_id = pad_id |
|
|
self.embed = layers.Embedding(vocab_size, embed_dim) |
|
|
self.pos_embed = layers.Embedding(input_dim=max_len, output_dim=embed_dim) |
|
|
self.dropout = layers.Dropout(dropout_rate) |
|
|
self.blocks = [MixerBlock(seq_len=MAX_LEN, dim=embed_dim, token_mlp_dim=256, channel_mlp_dim=embed_dim, dropout=0.1) for _ in range(3)] |
|
|
self.attn_pool = layers.Dense(1) |
|
|
|
|
|
self.ln_f = layers.LayerNormalization(epsilon=1e-5, dtype=tf.float32) |
|
|
|
|
|
self.latent = layers.Dense(latent_dim, activation=None) |
|
|
self.l2norm = L2NormLayer(axis=1) |
|
|
|
|
|
self.fc1 = layers.Dense(1152) |
|
|
self.fc2 = layers.Dense(embed_dim) |
|
|
|
|
|
def call(self, x, training=None): |
|
|
positions = tf.range(tf.shape(x)[1])[tf.newaxis, :] |
|
|
x_embed = self.embed(x) + self.pos_embed(positions) |
|
|
x_embed = self.dropout(x_embed, training=training) |
|
|
|
|
|
mask = tf.cast(tf.not_equal(x, self.pad_id), tf.float32) |
|
|
|
|
|
h = x_embed |
|
|
for block in self.blocks: |
|
|
h = block(h) |
|
|
|
|
|
v = h |
|
|
h = self.fc1(v) |
|
|
g, v_split = tf.split(h, 2, axis=-1) |
|
|
h = tf.nn.silu(g) * v_split |
|
|
h = self.fc2(h) |
|
|
h = self.ln_f(h) |
|
|
|
|
|
|
|
|
scores = self.attn_pool(h) |
|
|
scores = tf.cast(scores, tf.float32) |
|
|
|
|
|
scores = tf.where(mask[..., tf.newaxis] == 0, tf.constant(-1e9, tf.float32), scores) |
|
|
scores = tf.nn.softmax(scores, axis=1) |
|
|
|
|
|
pooled = tf.reduce_sum(h * scores, axis=1) |
|
|
latent = self.latent(pooled) |
|
|
latent = self.l2norm(latent) |
|
|
|
|
|
|
|
|
return tf.cast(latent, tf.float32) |
|
|
|
|
|
|
|
|
def build_contrastive_model(vocab_size): |
|
|
encoder = SentenceEncoder(vocab_size=vocab_size) |
|
|
input1 = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name="view1") |
|
|
input2 = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name="view2") |
|
|
z1 = encoder(input1) |
|
|
z2 = encoder(input2) |
|
|
out = layers.Concatenate(axis=0)([z1, z2]) |
|
|
return Model(inputs=[input1, input2], outputs=out), encoder |
|
|
|
|
|
def nt_xent_loss(y_true, y_pred): |
|
|
|
|
|
z = y_pred |
|
|
z = tf.cast(z, tf.float32) |
|
|
sim = tf.matmul(z, z, transpose_b=True) |
|
|
sim = sim / TEMPERATURE |
|
|
|
|
|
diag = tf.eye(tf.shape(sim)[0]) |
|
|
sim = sim - diag * 1e9 |
|
|
N2 = tf.shape(sim)[0] |
|
|
N = N2 // 2 |
|
|
|
|
|
labels_pos = tf.concat([tf.range(N, N2), tf.range(0, N)], axis=0) |
|
|
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_pos, logits=sim) |
|
|
return tf.reduce_mean(loss) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with strategy.scope(): |
|
|
model, encoder = build_contrastive_model(vocab_size) |
|
|
optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE) |
|
|
model.compile(optimizer=optimizer, loss=nt_xent_loss) |
|
|
model.summary() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
with open(DATA_PATH, "r", encoding="utf-8") as f: |
|
|
num_lines = sum(1 for _ in f) |
|
|
except Exception as e: |
|
|
print("Warning: ๋ฐ์ดํฐ ํ์ผ ๋ผ์ธ ์ ๊ณ์ฐ ์คํจ:", e) |
|
|
num_lines = None |
|
|
|
|
|
if num_lines: |
|
|
steps_per_epoch = max(1, num_lines // BATCH_SIZE) |
|
|
else: |
|
|
|
|
|
steps_per_epoch = 1000 |
|
|
|
|
|
print("steps_per_epoch:", steps_per_epoch) |
|
|
|
|
|
|
|
|
history = model.fit(ds, epochs=EPOCHS, steps_per_epoch=steps_per_epoch, verbose=1) |
|
|
|
|
|
|
|
|
encoder.save_weights("encoder_fit.weights.h5") |
|
|
print("Training finished and weights saved.") |