File size: 6,700 Bytes
c30ee5c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
import os
import torch
import torch.multiprocessing as mp
from tqdm import tqdm
from multiprocessing import Pool
from transformers import T5ForConditionalGeneration, AutoTokenizer
from utils.manager import Manager
from utils.arguments import *


@dataclass
class CommonArgs(CommonArguments):
    mode: str = "dev"
    plm: str = "doct5"
    loader_query: str = "none"
    dataset: str = "NQ"

    preprocess_plm: str = "t5"


@dataclass
class ModelArgs(ModelArguments):
    text_length: int = 512
    batch_size_eval: int = 50

    max_length: int = 64



def main(rank, manager):
    manager.setup(rank)

    loaders = manager.prepare()
    loader_text = loaders["text"]

    model = T5ForConditionalGeneration.from_pretrained(manager.config.plm_dir).to(manager.config.device)
    tokenizer = AutoTokenizer.from_pretrained(manager.config.plm_dir)

    max_length = manager.config.max_length
    query_per_doc = manager.config.query_per_doc
    mmp_path = os.path.join(manager.config.cache_root, "dataset", "text", "doct5.mmp")
    doct5_path = os.path.join(manager.config.data_root, manager.config.dataset, "doct5.tsv")

    # generate psudo queries
    if not manager.config.load_cache:
        text_token_ids = np.zeros((len(loader_text.sampler), query_per_doc, max_length), dtype=np.int32)

        with torch.no_grad():
            start_idx = end_idx = 0
            for i, x in enumerate(tqdm(loader_text, ncols=100, desc="Generating Queries")):
                input_ids = x["pos_seq_token_id"].to(manager.config.device)
                B = input_ids.shape[0]

                sequences = model.generate(
                    input_ids=input_ids,
                    max_length=max_length,
                    do_sample=True,
                    num_return_sequences=query_per_doc
                ).view(B, query_per_doc, -1).cpu().numpy()   # B, N, L

                end_idx += B
                text_token_ids[start_idx: end_idx, :, :sequences.shape[-1]] = sequences
                start_idx = end_idx

        # use memmap to temperarily save the generated token ids
        if manager._rank == 0:
            text_token_ids_mmp = np.memmap(
                mmp_path,
                shape=(len(loader_text.dataset), query_per_doc, max_length),
                dtype=np.int32,
                mode="w+"
            )
        manager.synchronize()
        text_token_ids_mmp = np.memmap(
            mmp_path,
            dtype=np.int32,
            mode="r+"
        ).reshape(len(loader_text.dataset), query_per_doc, max_length)
        text_token_ids_mmp[loader_text.sampler.start: loader_text.sampler.end] = text_token_ids

        del text_token_ids_mmp

    # tokenize psudo queries by preprocess_plm and save it in the dataset/text/preprocess_plm/doct5.mmp
    if rank == 0:
        # load all saved token ids
        text_token_ids = np.memmap(
            mmp_path,
            dtype=np.int32,
            mode="r+"
        ).reshape(len(loader_text.dataset), query_per_doc, max_length)

        if not manager.config.load_cache:
            with open(doct5_path, "w") as f:
                for sequences in tqdm(text_token_ids, ncols=100, desc="Decoding"):
                    texts = tokenizer.batch_decode(sequences, skip_special_tokens=True)    # N
                    f.write("\t".join(texts) + "\n")

        cache_dir = os.path.join(manager.config.cache_root, "dataset", "text", manager.config.preprocess_plm, "doct5")
        os.makedirs(cache_dir, exist_ok=True)
        preprocess_threads = 32
        all_line_count = len(loader_text.dataset)

        manager._set_plm(manager.config.preprocess_plm)
        tokenizer = AutoTokenizer.from_pretrained(manager.config.plm_dir)
        manager.logger.info("tokenizing {} in {} threads, output file will be saved at {}".format(doct5_path, preprocess_threads, cache_dir))

        arguments = []
        # create memmap first
        token_ids = np.memmap(
            os.path.join(cache_dir, "token_ids.mmp"),
            shape=(all_line_count, query_per_doc, max_length),
            mode="w+",
            dtype=np.int32
        )
        token_lengths = np.memmap(
            os.path.join(cache_dir, "token_lengths.mmp"),
            shape=(all_line_count, query_per_doc),
            mode="w+",
            dtype=np.int32
        )

        for i in range(preprocess_threads):
            start_idx = round(all_line_count * i / preprocess_threads)
            end_idx = round(all_line_count * (i+1) / preprocess_threads)
            arguments.append((doct5_path, cache_dir, all_line_count, start_idx, end_idx, query_per_doc, tokenizer, max_length))

        with Pool(preprocess_threads) as p:
            id2indexs = p.starmap(_tokenize_text, arguments)


def _tokenize_text(input_path, output_dir, all_line_count, start_idx, end_idx, query_per_doc, tokenizer, max_length):
    """
    tokenize the input text, do padding and truncation, then save the token ids, token_lengths, text ids

    Args:
        input_path: input text file path
        output_dir: directory of output numpy arrays
        start_idx: the begining index to read
        end_idx: the ending index
        tokenizer: transformer tokenizer
        max_length: max length of tokens
        text_type: corpus class
    """
    token_ids = np.memmap(
        os.path.join(output_dir, "token_ids.mmp"),
        shape=(all_line_count, query_per_doc, max_length),
        mode="r+",
        dtype=np.int32
    )
    token_lengths = np.memmap(
        os.path.join(output_dir, "token_lengths.mmp"),
        shape=(all_line_count, query_per_doc),
        mode="r+",
        dtype=np.int32
    )

    with open(input_path, 'r') as f:
        pbar = tqdm(total=end_idx-start_idx, desc="Tokenizing", ncols=100, leave=False)
        for idx, line in enumerate(f):
            if idx < start_idx:
                continue
            if idx >= end_idx:
                break

            psudo_queries = line.split('\t')
            output = tokenizer(psudo_queries, max_length=max_length, padding="max_length", truncation=True, return_tensors="np")

            token_id = output.input_ids
            token_length = output.attention_mask.sum(axis=-1)

            # token_length covers [CLS] and [SEP]
            token_lengths[idx] = token_length
            token_ids[idx] = token_id
            pbar.update(1)
        pbar.close()


if __name__ == "__main__":
    manager = Manager()
    manager.parse_args(CommonArgs=CommonArgs, ModelArgs=ModelArgs)

    if manager._distributed:
        mp.spawn(
            main,
            args=(manager,),
            nprocs=manager._world_size,
            join=True
        )
    else:
        main(0, manager)