donfu commited on
Commit
43219d4
·
1 Parent(s): 007df59

Deal with huge files, cleanup, mrege

Browse files
Files changed (2) hide show
  1. merge_parquets.py +76 -0
  2. process.py +68 -127
merge_parquets.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+ from typing import Callable, Iterable, TypeVar
5
+
6
+ import pyarrow as pa
7
+ import pyarrow.parquet as pq
8
+
9
+
10
+ def stream_to_parquet(path: Path, tables: Iterable[pa.Table]) -> None:
11
+ try:
12
+ first = next(tables)
13
+ except StopIteration:
14
+ return
15
+ schema = first.schema
16
+ with pq.ParquetWriter(path, schema) as writer:
17
+ writer.write_table(first)
18
+ for table in tables:
19
+ table = table.cast(schema) # enforce schema
20
+ writer.write_table(table)
21
+
22
+
23
+ def stream_from_parquet(path: Path) -> Iterable[pa.Table]:
24
+ reader = pq.ParquetFile(path)
25
+ for batch in reader.iter_batches():
26
+ yield pa.Table.from_batches([batch])
27
+
28
+
29
+ def stream_from_parquets(paths: Iterable[Path]) -> Iterable[pa.Table]:
30
+ for path in paths:
31
+ yield from stream_from_parquet(path)
32
+
33
+
34
+ T = TypeVar("T")
35
+
36
+
37
+ def coalesce(
38
+ items: Iterable[T], max_size: int, sizer: Callable[[T], int] = len
39
+ ) -> Iterable[list[T]]:
40
+ """Coalesce items into chunks. Tries to maximize chunk size and not exceed max_size.
41
+ If an item is larger than max_size, we will always exceed max_size, so make a
42
+ best effort and place it in its own chunk.
43
+ You can supply a custom sizer function to determine the size of an item.
44
+ Default is len.
45
+ >>> list(coalesce([1, 2, 11, 4, 4, 1, 2], 10, lambda x: x))
46
+ [[1, 2], [11], [4, 4, 1], [2]]
47
+ """
48
+ batch = []
49
+ current_size = 0
50
+ for item in items:
51
+ this_size = sizer(item)
52
+ if current_size + this_size > max_size:
53
+ yield batch
54
+ batch = []
55
+ current_size = 0
56
+ batch.append(item)
57
+ current_size += this_size
58
+ if batch:
59
+ yield batch
60
+
61
+
62
+ def coalesce_parquets(
63
+ paths: Iterable[Path], outpath: Path, max_size: int = 2**20
64
+ ) -> None:
65
+ tables = stream_from_parquets(paths)
66
+ # Instead of coalescing using number of rows as your metric, you could
67
+ # use pa.Table.nbytes or something.
68
+ # table_groups = coalesce(tables, max_size, sizer=lambda t: t.nbytes)
69
+ table_groups = coalesce(tables, max_size)
70
+ coalesced_tables = (pa.concat_tables(group) for group in table_groups)
71
+ stream_to_parquet(outpath, coalesced_tables)
72
+
73
+
74
+ def merge_parquet_dir(path: str, outpath: Path) -> None:
75
+ paths = Path(path).glob("*.parquet")
76
+ coalesce_parquets(paths, outpath)
process.py CHANGED
@@ -2,12 +2,9 @@
2
  # Simple script to convert StackExchange XML to Open Assistant format
3
  # Original code by https://github.com/b-mc2
4
 
 
5
  from bs4 import BeautifulSoup as bs
6
  import pandas as pd
7
- import os
8
- import glob
9
- import sys
10
- import re
11
  from html2text import html2text
12
  from datasets import load_dataset
13
  from lxml import etree
@@ -23,12 +20,11 @@ QUESTION_SCORE_TRESHOLD = 0
23
  ANSWER_SCORE_TRESHOLD = 0
24
  HF_DATASET = "donfu/oa-stackexchange"
25
  PARQUET_FILE = "{0}.parquet"
 
26
 
27
 
28
  def main():
29
  datasets = sys.argv[1:] if len(sys.argv) > 1 else list_cached_datasets()
30
- if "temp" in datasets:
31
- process_temp_datasets()
32
  for dataset in datasets:
33
  process_dataset(dataset)
34
 
@@ -44,28 +40,63 @@ def process_dataset(dataset):
44
  xml_file = f"{XML_DIR}/{dataset}.xml"
45
  parquet_file = PARQUET_FILE.format(dataset)
46
  source = SOURCE.format(dataset)
47
- if os.path.exists(xml_file) and not os.path.exists(parquet_file):
48
- df = parse_xml(xml_file)
49
- oa = convert_to_oa(df, source)
50
- save_parquet(oa, dataset)
 
51
  # upload_hf(dataset)
52
  else:
53
- print(f"XML file {xml_file} not found, please download first. Skipping...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
 
55
 
56
- def process_temp_datasets():
57
- parquet_files = glob.glob(f"temp/?.parquet")
58
- for file in parquet_files:
59
- print("Reading parquet file: ", file)
60
- df = pd.read_parquet(file)
61
- print("Converting to Open Assistant format...")
62
- oa = convert_to_oa(df, SOURCE.format("stackoverflow"))
63
- num = re.search(r"\d", file)[0]
64
- parquet_file = f"so/stackoverflow-{num}.parquet"
65
- df.to_parquet(parquet_file, row_group_size=100, engine="pyarrow", index=False)
66
- print("Wrote parquet file: ", parquet_file)
67
 
68
- merge_parquet_dir("so", "stackoverflow.parquet")
 
 
 
 
 
 
 
 
 
 
69
 
70
 
71
  def convert_to_oa(all, source):
@@ -74,26 +105,16 @@ def convert_to_oa(all, source):
74
 
75
  Only include questions with an AcceptedAnswerId
76
  """
77
- convert_tags = (
78
- lambda raw: raw.replace("-", " ")
79
- .replace("><", ", ")
80
- .replace("<", "")
81
- .replace(">", "")
82
- )
83
- create_metadata = lambda row: {
84
- "tags": convert_tags(row["Tags_q"]),
85
- "question_score": row["Score_q"],
86
- "answer_score": row["Score_a"],
87
- }
88
  questions = all[all["AcceptedAnswerId"] != 0]
89
  merged = pd.merge(
90
  questions,
91
  all,
92
- how="left",
93
  left_on="AcceptedAnswerId",
94
  right_on="Id",
95
  suffixes=("_q", "_a"),
96
  )
 
97
  del all
98
 
99
  merged["INSTRUCTION"] = (
@@ -106,6 +127,18 @@ def convert_to_oa(all, source):
106
  return merged[["INSTRUCTION", "RESPONSE", "SOURCE", "METADATA"]]
107
 
108
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  def save_parquet(df, dataset):
110
  """
111
  Save Dataframe to Parquet. See here for specs:
@@ -126,98 +159,6 @@ def upload_hf(dataset):
126
  print("Uploaded to Hugging Face: " + HF_DATASET)
127
 
128
 
129
- # Define a custom SAX ContentHandler to extract data from the XML file
130
- class StackExchangeHandler:
131
- def __init__(self, total_rows):
132
- self.total_rows = total_rows
133
- self.progress_bar = tqdm(total=self.total_rows)
134
- self.df = pd.DataFrame(
135
- columns=[
136
- "Id",
137
- "PostTypeId",
138
- "Body",
139
- "Title",
140
- "Tags",
141
- "Score",
142
- "AcceptedAnswerId",
143
- "ParentId",
144
- ]
145
- )
146
-
147
- def startElement(self, name, attrs):
148
- if name == "row":
149
- row = {}
150
- row["Id"] = int(attrs.getValue("Id"))
151
- row["PostTypeId"] = int(attrs.getValue("PostTypeId"))
152
- row["Body"] = str(attrs.getValue("Body"))
153
- row["Title"] = str(attrs.get("Title", ""))
154
- row["Tags"] = str(attrs.get("Tags", ""))
155
- row["Score"] = int(attrs.get("Score", 0))
156
- row["ParentId"] = int(attrs.get("ParentId", 0))
157
- row["AcceptedAnswerId"] = int(attrs.get("AcceptedAnswerId", 0))
158
-
159
- self.df = pd.concat(
160
- [self.df, pd.DataFrame([row], columns=self.df.columns)],
161
- ignore_index=True,
162
- )
163
- self.progress_bar.update(1)
164
-
165
-
166
- def parse_xml(path: str):
167
- """
168
- Parse (very large) XML files with sax parser and load it into a pandas Dataframe
169
- """
170
- total_rows = int(subprocess.getoutput(f"grep -c '<row' {path}"))
171
- print(f"Parsing {total_rows} rows from {path}...")
172
- columns = "Id PostTypeId Body Title Tags Score AcceptedAnswerId ParentId"
173
- rows = []
174
- if total_rows > 50000000:
175
- huge_file = True
176
- temp_file = 1
177
- os.makedirs("temp", exist_ok=True)
178
-
179
- context = etree.iterparse(path, events=("start", "end"))
180
-
181
- for event, element in tqdm(
182
- context, total=total_rows * 2
183
- ): # somehow it does not work just with start event, hence *2
184
- if event == "start" and element.tag == "row":
185
- row = [
186
- int(element.get("Id")),
187
- int(element.get("PostTypeId")),
188
- element.get("Body"),
189
- element.get("Title", ""),
190
- element.get("Tags", ""),
191
- int(element.get("Score", 0)),
192
- int(element.get("AcceptedAnswerId", 0)),
193
- int(element.get("ParentId", 0)),
194
- ]
195
- rows.append(row)
196
- if huge_file and len(rows) >= 10000000:
197
- df = pd.DataFrame(rows, columns=columns.split())
198
- df.to_parquet(
199
- f"temp/{temp_file}.parquet", engine="pyarrow", index=False
200
- )
201
- print(f"Wrote temp/{temp_file}.parquet file")
202
- rows = []
203
- temp_file += 1
204
- del df
205
- element.clear()
206
- element.getparent().remove(element)
207
-
208
- df = pd.DataFrame(rows, columns=columns.split())
209
- if huge_file:
210
- df.to_parquet(f"temp/{temp_file}.parquet", engine="pyarrow", index=False)
211
- del rows
212
- del df
213
- print("Merging all temp files...")
214
- merge_parquet_dir("temp", "temp/merged.parquet")
215
- df = pd.read_parquet("temp/merged.parquet")
216
- print(f"Loaded full dataset with {len(df)} rows")
217
-
218
- return df
219
-
220
-
221
  remove_markdown_links_pattern = r"\[([^\]]+)\]\(([^\)]+)\)"
222
  remove_remaining_links = r"https?:\/\/[^\s]+"
223
 
 
2
  # Simple script to convert StackExchange XML to Open Assistant format
3
  # Original code by https://github.com/b-mc2
4
 
5
+ import os, gc, glob, sys, re
6
  from bs4 import BeautifulSoup as bs
7
  import pandas as pd
 
 
 
 
8
  from html2text import html2text
9
  from datasets import load_dataset
10
  from lxml import etree
 
20
  ANSWER_SCORE_TRESHOLD = 0
21
  HF_DATASET = "donfu/oa-stackexchange"
22
  PARQUET_FILE = "{0}.parquet"
23
+ MAX_LENGTH = 1000 # max length of question or answer
24
 
25
 
26
  def main():
27
  datasets = sys.argv[1:] if len(sys.argv) > 1 else list_cached_datasets()
 
 
28
  for dataset in datasets:
29
  process_dataset(dataset)
30
 
 
40
  xml_file = f"{XML_DIR}/{dataset}.xml"
41
  parquet_file = PARQUET_FILE.format(dataset)
42
  source = SOURCE.format(dataset)
43
+ if not os.path.exists(xml_file):
44
+ print(f"XML file {xml_file} not found, please download first. Skipping...")
45
+ elif not os.path.exists(parquet_file):
46
+ df = parse_and_convert(xml_file, source)
47
+ save_parquet(df, dataset)
48
  # upload_hf(dataset)
49
  else:
50
+ print(f"File already converted {xml_file}. Skipping...")
51
+
52
+
53
+ def parse_and_convert(path: str, source: str):
54
+ """
55
+ Parse (very large) XML files with sax parser and load it into a pandas Dataframe
56
+ """
57
+ total_rows = int(subprocess.getoutput(f"grep -c '<row' {path}"))
58
+ print(f"Parsing {total_rows} rows from {path}...")
59
+ columns = "Id PostTypeId Body Title Tags Score AcceptedAnswerId ParentId"
60
+ rows = []
61
+ max_process = 10**6
62
+ processed = 0
63
+ oa_df = pd.DataFrame(columns=["INSTRUCTION", "RESPONSE", "SOURCE", "METADATA"])
64
+
65
+ context = etree.iterparse(path, events=("end",))
66
+
67
+ for _, element in tqdm(context, total=total_rows):
68
+ if element.tag == "row":
69
+ if len(element.get("Body")) > MAX_LENGTH:
70
+ continue
71
+ rows.append(parse_row(element))
72
+ processed += 1
73
+ element.clear()
74
+ while element.getprevious() is not None:
75
+ del element.getparent()[0]
76
+
77
+ if processed % max_process == 0 or processed == total_rows:
78
+ df = pd.DataFrame(rows, columns=columns.split())
79
+ rows = []
80
+ oa = convert_to_oa(df, source)
81
+ oa_df = pd.concat([oa_df, oa])
82
+ del df
83
+ del oa
84
+ gc.collect()
85
 
86
+ return oa_df
87
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
+ def parse_row(element):
90
+ return [
91
+ int(element.get("Id")),
92
+ int(element.get("PostTypeId")),
93
+ element.get("Body"),
94
+ element.get("Title", ""),
95
+ element.get("Tags", ""),
96
+ int(element.get("Score", 0)),
97
+ int(element.get("AcceptedAnswerId", 0)),
98
+ int(element.get("ParentId", 0)),
99
+ ]
100
 
101
 
102
  def convert_to_oa(all, source):
 
105
 
106
  Only include questions with an AcceptedAnswerId
107
  """
 
 
 
 
 
 
 
 
 
 
 
108
  questions = all[all["AcceptedAnswerId"] != 0]
109
  merged = pd.merge(
110
  questions,
111
  all,
112
+ how="inner",
113
  left_on="AcceptedAnswerId",
114
  right_on="Id",
115
  suffixes=("_q", "_a"),
116
  )
117
+
118
  del all
119
 
120
  merged["INSTRUCTION"] = (
 
127
  return merged[["INSTRUCTION", "RESPONSE", "SOURCE", "METADATA"]]
128
 
129
 
130
+ def convert_tags(raw):
131
+ return raw.replace("-", " ").replace("><", ", ").replace("<", "").replace(">", "")
132
+
133
+
134
+ def create_metadata(row):
135
+ return {
136
+ "tags": convert_tags(row["Tags_q"]),
137
+ "question_score": row["Score_q"],
138
+ "answer_score": row["Score_a"],
139
+ }
140
+
141
+
142
  def save_parquet(df, dataset):
143
  """
144
  Save Dataframe to Parquet. See here for specs:
 
159
  print("Uploaded to Hugging Face: " + HF_DATASET)
160
 
161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
  remove_markdown_links_pattern = r"\[([^\]]+)\]\(([^\)]+)\)"
163
  remove_remaining_links = r"https?:\/\/[^\s]+"
164