|
import os |
|
import torch |
|
import torch.multiprocessing as mp |
|
from tqdm import tqdm |
|
from multiprocessing import Pool |
|
from transformers import T5ForConditionalGeneration, AutoTokenizer |
|
from utils.manager import Manager |
|
from utils.arguments import * |
|
|
|
|
|
@dataclass |
|
class CommonArgs(CommonArguments): |
|
mode: str = "dev" |
|
plm: str = "doct5" |
|
loader_query: str = "none" |
|
dataset: str = "NQ" |
|
|
|
preprocess_plm: str = "t5" |
|
|
|
|
|
@dataclass |
|
class ModelArgs(ModelArguments): |
|
text_length: int = 512 |
|
batch_size_eval: int = 50 |
|
|
|
max_length: int = 64 |
|
|
|
|
|
|
|
def main(rank, manager): |
|
manager.setup(rank) |
|
|
|
loaders = manager.prepare() |
|
loader_text = loaders["text"] |
|
|
|
model = T5ForConditionalGeneration.from_pretrained(manager.config.plm_dir).to(manager.config.device) |
|
tokenizer = AutoTokenizer.from_pretrained(manager.config.plm_dir) |
|
|
|
max_length = manager.config.max_length |
|
query_per_doc = manager.config.query_per_doc |
|
mmp_path = os.path.join(manager.config.cache_root, "dataset", "text", "doct5.mmp") |
|
doct5_path = os.path.join(manager.config.data_root, manager.config.dataset, "doct5.tsv") |
|
|
|
|
|
if not manager.config.load_cache: |
|
text_token_ids = np.zeros((len(loader_text.sampler), query_per_doc, max_length), dtype=np.int32) |
|
|
|
with torch.no_grad(): |
|
start_idx = end_idx = 0 |
|
for i, x in enumerate(tqdm(loader_text, ncols=100, desc="Generating Queries")): |
|
input_ids = x["pos_seq_token_id"].to(manager.config.device) |
|
B = input_ids.shape[0] |
|
|
|
sequences = model.generate( |
|
input_ids=input_ids, |
|
max_length=max_length, |
|
do_sample=True, |
|
num_return_sequences=query_per_doc |
|
).view(B, query_per_doc, -1).cpu().numpy() |
|
|
|
end_idx += B |
|
text_token_ids[start_idx: end_idx, :, :sequences.shape[-1]] = sequences |
|
start_idx = end_idx |
|
|
|
|
|
if manager._rank == 0: |
|
text_token_ids_mmp = np.memmap( |
|
mmp_path, |
|
shape=(len(loader_text.dataset), query_per_doc, max_length), |
|
dtype=np.int32, |
|
mode="w+" |
|
) |
|
manager.synchronize() |
|
text_token_ids_mmp = np.memmap( |
|
mmp_path, |
|
dtype=np.int32, |
|
mode="r+" |
|
).reshape(len(loader_text.dataset), query_per_doc, max_length) |
|
text_token_ids_mmp[loader_text.sampler.start: loader_text.sampler.end] = text_token_ids |
|
|
|
del text_token_ids_mmp |
|
|
|
|
|
if rank == 0: |
|
|
|
text_token_ids = np.memmap( |
|
mmp_path, |
|
dtype=np.int32, |
|
mode="r+" |
|
).reshape(len(loader_text.dataset), query_per_doc, max_length) |
|
|
|
if not manager.config.load_cache: |
|
with open(doct5_path, "w") as f: |
|
for sequences in tqdm(text_token_ids, ncols=100, desc="Decoding"): |
|
texts = tokenizer.batch_decode(sequences, skip_special_tokens=True) |
|
f.write("\t".join(texts) + "\n") |
|
|
|
cache_dir = os.path.join(manager.config.cache_root, "dataset", "text", manager.config.preprocess_plm, "doct5") |
|
os.makedirs(cache_dir, exist_ok=True) |
|
preprocess_threads = 32 |
|
all_line_count = len(loader_text.dataset) |
|
|
|
manager._set_plm(manager.config.preprocess_plm) |
|
tokenizer = AutoTokenizer.from_pretrained(manager.config.plm_dir) |
|
manager.logger.info("tokenizing {} in {} threads, output file will be saved at {}".format(doct5_path, preprocess_threads, cache_dir)) |
|
|
|
arguments = [] |
|
|
|
token_ids = np.memmap( |
|
os.path.join(cache_dir, "token_ids.mmp"), |
|
shape=(all_line_count, query_per_doc, max_length), |
|
mode="w+", |
|
dtype=np.int32 |
|
) |
|
token_lengths = np.memmap( |
|
os.path.join(cache_dir, "token_lengths.mmp"), |
|
shape=(all_line_count, query_per_doc), |
|
mode="w+", |
|
dtype=np.int32 |
|
) |
|
|
|
for i in range(preprocess_threads): |
|
start_idx = round(all_line_count * i / preprocess_threads) |
|
end_idx = round(all_line_count * (i+1) / preprocess_threads) |
|
arguments.append((doct5_path, cache_dir, all_line_count, start_idx, end_idx, query_per_doc, tokenizer, max_length)) |
|
|
|
with Pool(preprocess_threads) as p: |
|
id2indexs = p.starmap(_tokenize_text, arguments) |
|
|
|
|
|
def _tokenize_text(input_path, output_dir, all_line_count, start_idx, end_idx, query_per_doc, tokenizer, max_length): |
|
""" |
|
tokenize the input text, do padding and truncation, then save the token ids, token_lengths, text ids |
|
|
|
Args: |
|
input_path: input text file path |
|
output_dir: directory of output numpy arrays |
|
start_idx: the begining index to read |
|
end_idx: the ending index |
|
tokenizer: transformer tokenizer |
|
max_length: max length of tokens |
|
text_type: corpus class |
|
""" |
|
token_ids = np.memmap( |
|
os.path.join(output_dir, "token_ids.mmp"), |
|
shape=(all_line_count, query_per_doc, max_length), |
|
mode="r+", |
|
dtype=np.int32 |
|
) |
|
token_lengths = np.memmap( |
|
os.path.join(output_dir, "token_lengths.mmp"), |
|
shape=(all_line_count, query_per_doc), |
|
mode="r+", |
|
dtype=np.int32 |
|
) |
|
|
|
with open(input_path, 'r') as f: |
|
pbar = tqdm(total=end_idx-start_idx, desc="Tokenizing", ncols=100, leave=False) |
|
for idx, line in enumerate(f): |
|
if idx < start_idx: |
|
continue |
|
if idx >= end_idx: |
|
break |
|
|
|
psudo_queries = line.split('\t') |
|
output = tokenizer(psudo_queries, max_length=max_length, padding="max_length", truncation=True, return_tensors="np") |
|
|
|
token_id = output.input_ids |
|
token_length = output.attention_mask.sum(axis=-1) |
|
|
|
|
|
token_lengths[idx] = token_length |
|
token_ids[idx] = token_id |
|
pbar.update(1) |
|
pbar.close() |
|
|
|
|
|
if __name__ == "__main__": |
|
manager = Manager() |
|
manager.parse_args(CommonArgs=CommonArgs, ModelArgs=ModelArgs) |
|
|
|
if manager._distributed: |
|
mp.spawn( |
|
main, |
|
args=(manager,), |
|
nprocs=manager._world_size, |
|
join=True |
|
) |
|
else: |
|
main(0, manager) |
|
|