wikipedia-ja-20231030 / datasets_to_embs.py
hotchpotch's picture
Add script to generate embeddings from datasets
07119c3
from __future__ import annotations
from dataclasses import dataclass
from typing import Generator
import numpy as np
from sentence_transformers import SentenceTransformer
from tqdm import tqdm
import torch
import argparse
from pathlib import Path
from datasets import load_dataset
parser = argparse.ArgumentParser(description="Convert datasets to embeddings")
parser.add_argument(
"-t",
"--target",
type=str,
required=True,
choices=["data", "chunked"],
help="target dataset, data or chunked",
)
parser.add_argument(
"-d",
"--debug",
action="store_true",
help="debug mode, use small dataset",
)
# model_name
parser.add_argument(
"-m",
"--model_name",
type=str,
required=True,
help="huggingface model name",
)
# input_prefix
parser.add_argument(
"-i",
"--input_prefix",
type=str,
required=False,
default="",
help="input prefix",
)
# max_seq_length
parser.add_argument(
"-l",
"--max_seq_length",
type=int,
required=False,
default=512,
help="max sequence length",
)
# output_name
parser.add_argument(
"-o",
"--output_name",
type=str,
required=True,
help="output dir",
)
args = parser.parse_args()
@dataclass
class EmbConfig:
model_name: str
input_prefix: str
max_seq_length: int
args = parser.parse_args()
target_local_ds = args.target
EMB_CONFIG = EmbConfig(
model_name=args.model_name,
input_prefix=args.input_prefix,
max_seq_length=args.max_seq_length,
)
embs_dir = f"embs{'_debug' if args.debug else ''}"
output_embs_path = Path("/".join([embs_dir, args.output_name, target_local_ds]))
output_embs_path.mkdir(parents=True, exist_ok=True)
print("output path:", output_embs_path)
MODEL = SentenceTransformer(EMB_CONFIG.model_name)
MODEL.max_seq_length = EMB_CONFIG.max_seq_length
def to_embs(texts: list[str], group_size=1024) -> Generator[np.ndarray, None, None]:
group = []
for text in texts:
group.append(text)
if len(group) == group_size:
embeddings = MODEL.encode(
group,
normalize_embeddings=True,
show_progress_bar=False,
)
yield embeddings # type: ignore
group = []
if len(group) > 0:
embeddings = MODEL.encode(
group, normalize_embeddings=True, show_progress_bar=False
)
yield embeddings # type: ignore
def _to_data_text(
data, prefix=EMB_CONFIG.input_prefix, max_len=int(EMB_CONFIG.max_seq_length * 1.5)
):
return (prefix + data["title"] + "\n" + data["text"])[0:max_len]
def _to_chunk_text(
data, prefix=EMB_CONFIG.input_prefix, max_len=int(EMB_CONFIG.max_seq_length * 1.5)
):
return (prefix + data["title"] + "\n" + data["overlap_text"] + data["text"])[
:max_len
]
def ds_to_embs(
ds,
text_fn,
group_size: int,
):
texts = []
total = len(ds)
pbar = tqdm(total=total)
# text は group_size 件ごとに処理する
for i in range(0, total, group_size):
texts = []
for data in ds.select(range(i, min(i + group_size, total))):
data: dict = data
text = text_fn(data)
texts.append(text)
embs = []
for group_embs in to_embs(texts):
embs.append(group_embs)
pbar.update(len(group_embs))
embs = np.concatenate(embs)
yield embs, i, pbar
if torch.cuda.is_available():
print("use cuda")
MODEL.to("cuda")
else:
print("!! Warning: use cpu")
ds = load_dataset(args.target)["train"] # type: ignore
to_text = _to_data_text if args.target == "data" else _to_chunk_text
if args.debug:
print("debug mode")
ds = ds.select(range(19998)) # type: ignore
print("small dataset len:", len(ds))
group_size = 10000
else:
print("dataset len:", len(ds))
group_size = 100_000
for embs, idx, pbar in ds_to_embs(ds, to_text, group_size=group_size):
filename = f"{idx}.npz"
filepath = output_embs_path / filename
pbar.desc = f"saving...: {str(filepath)}"
np.savez_compressed(filepath, embs=embs.astype(np.float16))
pbar.desc = ""