File size: 1,534 Bytes
58ed682 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
from transformers import AutoTokenizer, FlaxBertForSequenceClassification
import datasets
import jax
import jax.numpy as jnp
import time
from flax.training.common_utils import shard
from jax import pmap
def pred_fn(inputs):
outputs = model(**inputs)
return jax.nn.sigmoid(outputs.logits)
def get_toxicity(batch, batch_size):
num_examples = len(batch["text"])
inputs = tokenizer(
batch["text"],
return_tensors="np",
truncation=True,
padding="max_length",
max_length=512,
)
inputs = shard(
{
k: jnp.pad(jnp.array(v), ((0, batch_size - num_examples), (0, 0)))
for k, v in inputs.items()
}
)
preds = p_pred(inputs)
preds = preds.reshape(-1, preds.shape[-1])[:num_examples]
for k, v in model.config.id2label.items():
batch[v] = preds[:, k].tolist()
return batch
p_pred = pmap(pred_fn, "inputs")
tokenizer = AutoTokenizer.from_pretrained("TurkuNLP/bert-large-finnish-cased-toxicity")
model = FlaxBertForSequenceClassification.from_pretrained(
"TurkuNLP/bert-large-finnish-cased-toxicity", from_pt=True, dtype=jnp.bfloat16
)
dataset = datasets.load_from_disk("/researchdisk/mc4_3.1.0_fi_cleaned")
BATCH_SIZE = 8192
dataset = dataset.map(
get_toxicity,
num_proc=1,
batched=True,
batch_size=BATCH_SIZE,
fn_kwargs={"batch_size": BATCH_SIZE},
)
print(dataset)
# SAVE DATASET
dataset.save_to_disk(
"/researchdisk/mc4_3.1.0_fi_cleaned_dataset_toxicity_labels", num_proc=32
)
|