hotchpotch commited on
Commit
07119c3
1 Parent(s): e45db75

Add script to generate embeddings from datasets

Browse files
Files changed (3) hide show
  1. .gitignore +1 -0
  2. Generate.md +32 -0
  3. datasets_to_embs.py +171 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ *_debug/*
Generate.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # create embeddings
3
+
4
+ ### multilingual-e5-small + passage
5
+
6
+ ```
7
+ python datasets_to_embs.py \
8
+ --target="data" \
9
+ --model_name="intfloat/multilingual-e5-small" \
10
+ --output_name="multilingual-e5-small-passage" \
11
+ --input_prefix="passage: "
12
+ python datasets_to_embs.py \
13
+ --target="chunked" \
14
+ --model_name="intfloat/multilingual-e5-small" \
15
+ --output_name="multilingual-e5-small-passage" \
16
+ --input_prefix="passage: "
17
+ ```
18
+
19
+ ### multilingual-e5-small + query
20
+
21
+ ```
22
+ python datasets_to_embs.py \
23
+ --target="data" \
24
+ --model_name="intfloat/multilingual-e5-small" \
25
+ --output_name="multilingual-e5-small-query" \
26
+ --input_prefix="query: "
27
+ python datasets_to_embs.py \
28
+ --target="chunked" \
29
+ --model_name="intfloat/multilingual-e5-small" \
30
+ --output_name="multilingual-e5-small-query" \
31
+ --input_prefix="query: "
32
+ ``````
datasets_to_embs.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from dataclasses import dataclass
3
+ from typing import Generator
4
+ import numpy as np
5
+ from sentence_transformers import SentenceTransformer
6
+ from tqdm import tqdm
7
+ import torch
8
+ import argparse
9
+ from pathlib import Path
10
+ from datasets import load_dataset
11
+
12
+ parser = argparse.ArgumentParser(description="Convert datasets to embeddings")
13
+ parser.add_argument(
14
+ "-t",
15
+ "--target",
16
+ type=str,
17
+ required=True,
18
+ choices=["data", "chunked"],
19
+ help="target dataset, data or chunked",
20
+ )
21
+
22
+ parser.add_argument(
23
+ "-d",
24
+ "--debug",
25
+ action="store_true",
26
+ help="debug mode, use small dataset",
27
+ )
28
+ # model_name
29
+ parser.add_argument(
30
+ "-m",
31
+ "--model_name",
32
+ type=str,
33
+ required=True,
34
+ help="huggingface model name",
35
+ )
36
+ # input_prefix
37
+ parser.add_argument(
38
+ "-i",
39
+ "--input_prefix",
40
+ type=str,
41
+ required=False,
42
+ default="",
43
+ help="input prefix",
44
+ )
45
+ # max_seq_length
46
+ parser.add_argument(
47
+ "-l",
48
+ "--max_seq_length",
49
+ type=int,
50
+ required=False,
51
+ default=512,
52
+ help="max sequence length",
53
+ )
54
+ # output_name
55
+ parser.add_argument(
56
+ "-o",
57
+ "--output_name",
58
+ type=str,
59
+ required=True,
60
+ help="output dir",
61
+ )
62
+ args = parser.parse_args()
63
+
64
+
65
+ @dataclass
66
+ class EmbConfig:
67
+ model_name: str
68
+ input_prefix: str
69
+ max_seq_length: int
70
+
71
+
72
+ args = parser.parse_args()
73
+
74
+ target_local_ds = args.target
75
+
76
+ EMB_CONFIG = EmbConfig(
77
+ model_name=args.model_name,
78
+ input_prefix=args.input_prefix,
79
+ max_seq_length=args.max_seq_length,
80
+ )
81
+ embs_dir = f"embs{'_debug' if args.debug else ''}"
82
+
83
+ output_embs_path = Path("/".join([embs_dir, args.output_name, target_local_ds]))
84
+ output_embs_path.mkdir(parents=True, exist_ok=True)
85
+
86
+ print("output path:", output_embs_path)
87
+
88
+ MODEL = SentenceTransformer(EMB_CONFIG.model_name)
89
+ MODEL.max_seq_length = EMB_CONFIG.max_seq_length
90
+
91
+
92
+ def to_embs(texts: list[str], group_size=1024) -> Generator[np.ndarray, None, None]:
93
+ group = []
94
+ for text in texts:
95
+ group.append(text)
96
+ if len(group) == group_size:
97
+ embeddings = MODEL.encode(
98
+ group,
99
+ normalize_embeddings=True,
100
+ show_progress_bar=False,
101
+ )
102
+ yield embeddings # type: ignore
103
+ group = []
104
+ if len(group) > 0:
105
+ embeddings = MODEL.encode(
106
+ group, normalize_embeddings=True, show_progress_bar=False
107
+ )
108
+ yield embeddings # type: ignore
109
+
110
+
111
+ def _to_data_text(
112
+ data, prefix=EMB_CONFIG.input_prefix, max_len=int(EMB_CONFIG.max_seq_length * 1.5)
113
+ ):
114
+ return (prefix + data["title"] + "\n" + data["text"])[0:max_len]
115
+
116
+
117
+ def _to_chunk_text(
118
+ data, prefix=EMB_CONFIG.input_prefix, max_len=int(EMB_CONFIG.max_seq_length * 1.5)
119
+ ):
120
+ return (prefix + data["title"] + "\n" + data["overlap_text"] + data["text"])[
121
+ :max_len
122
+ ]
123
+
124
+
125
+ def ds_to_embs(
126
+ ds,
127
+ text_fn,
128
+ group_size: int,
129
+ ):
130
+ texts = []
131
+ total = len(ds)
132
+ pbar = tqdm(total=total)
133
+ # text は group_size 件ごとに処理する
134
+ for i in range(0, total, group_size):
135
+ texts = []
136
+ for data in ds.select(range(i, min(i + group_size, total))):
137
+ data: dict = data
138
+ text = text_fn(data)
139
+ texts.append(text)
140
+ embs = []
141
+ for group_embs in to_embs(texts):
142
+ embs.append(group_embs)
143
+ pbar.update(len(group_embs))
144
+ embs = np.concatenate(embs)
145
+ yield embs, i, pbar
146
+
147
+
148
+ if torch.cuda.is_available():
149
+ print("use cuda")
150
+ MODEL.to("cuda")
151
+ else:
152
+ print("!! Warning: use cpu")
153
+
154
+ ds = load_dataset(args.target)["train"] # type: ignore
155
+ to_text = _to_data_text if args.target == "data" else _to_chunk_text
156
+
157
+ if args.debug:
158
+ print("debug mode")
159
+ ds = ds.select(range(19998)) # type: ignore
160
+ print("small dataset len:", len(ds))
161
+ group_size = 10000
162
+ else:
163
+ print("dataset len:", len(ds))
164
+ group_size = 100_000
165
+
166
+ for embs, idx, pbar in ds_to_embs(ds, to_text, group_size=group_size):
167
+ filename = f"{idx}.npz"
168
+ filepath = output_embs_path / filename
169
+ pbar.desc = f"saving...: {str(filepath)}"
170
+ np.savez_compressed(filepath, embs=embs.astype(np.float16))
171
+ pbar.desc = ""