OpenTransformer commited on
Commit
0bc7325
·
verified ·
1 Parent(s): bd63fe9

Upload source/crawl_v5.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. source/crawl_v5.py +405 -0
source/crawl_v5.py ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """Fresh Web Crawler V5 - Concurrent version for much higher throughput.
3
+ Uses ThreadPoolExecutor for parallel fetches, targets ~1GB chunks.
4
+ Uploads to OpenTransformer/web-crawl-2026."""
5
+ import os, sys, json, gzip, time, hashlib, random, re, traceback, threading
6
+ from collections import deque
7
+ from urllib.parse import urlparse, urljoin
8
+ from concurrent.futures import ThreadPoolExecutor, as_completed
9
+ import requests
10
+ from bs4 import BeautifulSoup
11
+ import trafilatura
12
+ from huggingface_hub import HfApi, login
13
+
14
+ HF_TOKEN = "HF_TOKEN_REDACTED"
15
+ TARGET_REPO = "OpenTransformer/web-crawl-2026"
16
+ OUTPUT_DIR = "/workspace/scraped_data"
17
+ STATE_FILE = "/workspace/crawl_state_v5.json"
18
+ CHUNK_TARGET_BYTES = 1_200_000_000
19
+ TIMEOUT = 12
20
+ MIN_TEXT_LEN = 200
21
+ MAX_TEXT_LEN = 200_000
22
+ MAX_URLS_PER_DOMAIN = 500
23
+ NUM_WORKERS = 20
24
+ BATCH_SIZE = 50
25
+
26
+ os.makedirs(OUTPUT_DIR, exist_ok=True)
27
+ login(token=HF_TOKEN, add_to_git_credential=False)
28
+ api = HfApi(token=HF_TOKEN)
29
+
30
+ USER_AGENTS = [
31
+ "Mozilla/5.0 (compatible; OpenTransformerBot/1.0; +https://huggingface.co/OpenTransformer)",
32
+ "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
33
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 Chrome/121.0.0.0 Safari/537.36",
34
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 Safari/605.1.15",
35
+ ]
36
+
37
+ SEED_DOMAINS = [
38
+ "https://www.reuters.com", "https://apnews.com", "https://www.bbc.com/news",
39
+ "https://www.theguardian.com", "https://www.npr.org", "https://arstechnica.com",
40
+ "https://news.ycombinator.com", "https://lobste.rs", "https://dev.to",
41
+ "https://stackoverflow.blog", "https://www.infoq.com",
42
+ "https://www.scientificamerican.com", "https://www.nature.com/news",
43
+ "https://phys.org", "https://www.quantamagazine.org",
44
+ "https://ocw.mit.edu", "https://plato.stanford.edu",
45
+ "https://en.wikipedia.org/wiki/Special:Random",
46
+ "https://en.wikisource.org", "https://www.gutenberg.org",
47
+ "https://www.usa.gov", "https://www.gov.uk", "https://data.gov", "https://www.loc.gov",
48
+ "https://www.smithsonianmag.com", "https://nautil.us",
49
+ "https://longreads.com", "https://theconversation.com",
50
+ "https://fs.blog", "https://waitbutwhy.com",
51
+ "https://www.lesswrong.com", "https://marginalrevolution.com",
52
+ "https://www.wired.com", "https://www.theatlantic.com",
53
+ "https://www.newyorker.com", "https://www.economist.com",
54
+ "https://www.pbs.org", "https://www.vox.com",
55
+ "https://www.propublica.org", "https://fivethirtyeight.com",
56
+ "https://www.technologyreview.com", "https://spectrum.ieee.org",
57
+ "https://www.livescience.com", "https://www.space.com",
58
+ "https://www.sciencedaily.com", "https://www.psychologytoday.com",
59
+ "https://news.mit.edu", "https://www.cam.ac.uk/news",
60
+ "https://hai.stanford.edu/news", "https://www.ox.ac.uk/news",
61
+ "https://www.brookings.edu", "https://www.rand.org",
62
+ "https://www.cfr.org", "https://carnegieendowment.org",
63
+ "https://www.history.com", "https://www.nationalgeographic.com",
64
+ "https://www.britannica.com", "https://www.investopedia.com",
65
+ "https://www.healthline.com", "https://www.webmd.com",
66
+ "https://www.mayoclinic.org", "https://medlineplus.gov",
67
+ "https://www.khanacademy.org", "https://www.edx.org",
68
+ "https://arxiv.org/list/cs.AI/recent", "https://arxiv.org/list/cs.CL/recent",
69
+ "https://www.freecodecamp.org/news",
70
+ "https://www.geeksforgeeks.org", "https://realpython.com",
71
+ "https://hackernoon.com",
72
+ "https://www.kdnuggets.com",
73
+ ]
74
+
75
+ BLOCKED_EXTENSIONS = {".pdf", ".jpg", ".jpeg", ".png", ".gif", ".svg", ".mp3", ".mp4",
76
+ ".avi", ".mov", ".zip", ".tar", ".gz", ".exe", ".dmg", ".iso",
77
+ ".css", ".js", ".woff", ".woff2", ".ttf", ".eot", ".ico", ".webp",
78
+ ".bmp", ".tiff", ".doc", ".docx", ".xls", ".xlsx", ".ppt", ".pptx"}
79
+ BLOCKED_PATTERNS = re.compile(
80
+ r"(login|signup|signin|register|cart|checkout|payment|admin|wp-admin|"
81
+ r"facebook\.com|twitter\.com|instagram\.com|tiktok\.com|linkedin\.com|"
82
+ r"youtube\.com/watch|amazon\.|ebay\.|\.pdf$|/tag/|/category/|"
83
+ r"/page/\d+|/feed/|/rss|/atom|#comment|/reply|/share|mailto:|tel:)", re.I)
84
+
85
+ domain_last_fetch = {}
86
+ domain_lock = threading.Lock()
87
+ DOMAIN_DELAY = 1.0
88
+
89
+
90
+ def load_state():
91
+ if os.path.exists(STATE_FILE):
92
+ try:
93
+ with open(STATE_FILE) as f:
94
+ return json.load(f)
95
+ except Exception:
96
+ pass
97
+ return {"chunk_num": 0, "total_docs": 0, "total_bytes_uploaded": 0,
98
+ "seen_hashes": [], "domain_counts": {}}
99
+
100
+
101
+ def save_state(state):
102
+ if len(state.get("seen_hashes", [])) > 500000:
103
+ state["seen_hashes"] = state["seen_hashes"][-200000:]
104
+ with open(STATE_FILE, "w") as f:
105
+ json.dump(state, f)
106
+
107
+
108
+ def is_valid_url(url):
109
+ try:
110
+ p = urlparse(url)
111
+ if p.scheme not in ("http", "https"):
112
+ return False
113
+ if not p.netloc or "." not in p.netloc:
114
+ return False
115
+ ext = os.path.splitext(p.path)[1].lower()
116
+ if ext in BLOCKED_EXTENSIONS:
117
+ return False
118
+ if BLOCKED_PATTERNS.search(url):
119
+ return False
120
+ return True
121
+ except Exception:
122
+ return False
123
+
124
+
125
+ def extract_links(html, base_url):
126
+ links = []
127
+ try:
128
+ soup = BeautifulSoup(html, "html.parser")
129
+ for a in soup.find_all("a", href=True):
130
+ href = a["href"].strip()
131
+ if href.startswith("#") or href.startswith("javascript:"):
132
+ continue
133
+ full = urljoin(base_url, href)
134
+ full = full.split("#")[0]
135
+ if is_valid_url(full):
136
+ links.append(full)
137
+ except Exception:
138
+ pass
139
+ return links
140
+
141
+
142
+ def content_hash(text):
143
+ return hashlib.md5(text[:500].encode("utf-8", errors="ignore")).hexdigest()
144
+
145
+
146
+ def fetch_and_extract(url):
147
+ domain = urlparse(url).netloc
148
+ with domain_lock:
149
+ last = domain_last_fetch.get(domain, 0)
150
+ now = time.time()
151
+ wait = DOMAIN_DELAY - (now - last)
152
+ if wait > 0:
153
+ time.sleep(wait)
154
+ domain_last_fetch[domain] = time.time()
155
+ try:
156
+ session = requests.Session()
157
+ resp = session.get(url, timeout=TIMEOUT, allow_redirects=True,
158
+ headers={"User-Agent": random.choice(USER_AGENTS),
159
+ "Accept": "text/html,application/xhtml+xml",
160
+ "Accept-Language": "en-US,en;q=0.9"})
161
+ if resp.status_code != 200:
162
+ return None
163
+ ct = resp.headers.get("content-type", "")
164
+ if "text/html" not in ct and "xhtml" not in ct:
165
+ return None
166
+ html = resp.text
167
+ final_url = resp.url
168
+ text = trafilatura.extract(html, include_comments=False, include_tables=True,
169
+ no_fallback=False, favor_precision=False, url=final_url)
170
+ if not text or len(text) < MIN_TEXT_LEN:
171
+ return None
172
+ text = text[:MAX_TEXT_LEN]
173
+ links = extract_links(html, final_url)
174
+ return (final_url, text, domain, links)
175
+ except Exception:
176
+ return None
177
+
178
+
179
+ def get_random_wikipedia_urls(n=300):
180
+ urls = []
181
+ try:
182
+ resp = requests.get("https://en.wikipedia.org/w/api.php",
183
+ params={"action": "query", "list": "random",
184
+ "rnnamespace": 0, "rnlimit": min(n, 500), "format": "json"},
185
+ timeout=10)
186
+ data = resp.json()
187
+ for page in data.get("query", {}).get("random", []):
188
+ title = page["title"].replace(" ", "_")
189
+ urls.append("https://en.wikipedia.org/wiki/" + title)
190
+ except Exception:
191
+ pass
192
+ return urls
193
+
194
+
195
+ def get_hn_urls(n=60):
196
+ urls = []
197
+ try:
198
+ for endpoint in ["topstories", "newstories", "beststories"]:
199
+ resp = requests.get("https://hacker-news.firebaseio.com/v0/%s.json" % endpoint, timeout=10)
200
+ ids = resp.json()[:n]
201
+ for sid in ids:
202
+ try:
203
+ item = requests.get(
204
+ "https://hacker-news.firebaseio.com/v0/item/%d.json" % sid, timeout=5).json()
205
+ if item and item.get("url"):
206
+ urls.append(item["url"])
207
+ except Exception:
208
+ continue
209
+ except Exception:
210
+ pass
211
+ return urls
212
+
213
+
214
+ def get_lobsters_urls(n=25):
215
+ urls = []
216
+ try:
217
+ resp = requests.get("https://lobste.rs/hottest.json", timeout=10)
218
+ for item in resp.json()[:n]:
219
+ if item.get("url"):
220
+ urls.append(item["url"])
221
+ except Exception:
222
+ pass
223
+ return urls
224
+
225
+
226
+ def upload_chunk(filepath, remote_name):
227
+ fsize = os.path.getsize(filepath) / (1024 * 1024)
228
+ print(" Uploading %s (%.1f MB)..." % (remote_name, fsize), flush=True)
229
+ for attempt in range(5):
230
+ try:
231
+ api.upload_file(
232
+ path_or_fileobj=filepath,
233
+ path_in_repo="data/" + remote_name,
234
+ repo_id=TARGET_REPO,
235
+ repo_type="dataset",
236
+ )
237
+ print(" Uploaded %s (%.1f MB)" % (remote_name, fsize), flush=True)
238
+ return True
239
+ except Exception as e:
240
+ print(" Upload attempt %d failed: %s" % (attempt + 1, e), flush=True)
241
+ time.sleep(30 * (attempt + 1))
242
+ return False
243
+
244
+
245
+ def main():
246
+ print("=" * 60, flush=True)
247
+ print("Fresh Web Crawler V5 (Concurrent)", flush=True)
248
+ print("Target: %s" % TARGET_REPO, flush=True)
249
+ print("Workers: %d, Chunk target: ~%.1fGB" % (NUM_WORKERS, CHUNK_TARGET_BYTES / 1e9), flush=True)
250
+ print("=" * 60, flush=True)
251
+
252
+ state = load_state()
253
+ seen_hashes = set(state.get("seen_hashes", []))
254
+ domain_counts = state.get("domain_counts", {})
255
+ chunk_num = state.get("chunk_num", 0)
256
+ total_docs = state.get("total_docs", 0)
257
+
258
+ while True:
259
+ url_queue = deque()
260
+ discovered = set()
261
+
262
+ seeds = list(SEED_DOMAINS)
263
+ random.shuffle(seeds)
264
+ for seed in seeds:
265
+ url_queue.append(seed)
266
+ discovered.add(seed)
267
+
268
+ for u in get_random_wikipedia_urls(300):
269
+ if u not in discovered:
270
+ url_queue.append(u)
271
+ discovered.add(u)
272
+
273
+ for u in get_hn_urls(60) + get_lobsters_urls(25):
274
+ if u not in discovered:
275
+ url_queue.append(u)
276
+ discovered.add(u)
277
+
278
+ chunk_name = "crawl_v5_chunk%04d.jsonl.gz" % chunk_num
279
+ chunk_path = os.path.join(OUTPUT_DIR, chunk_name)
280
+ f = gzip.open(chunk_path, "wt", encoding="utf-8")
281
+ chunk_bytes = 0
282
+ chunk_docs = 0
283
+ chunk_start = time.time()
284
+ write_lock = threading.Lock()
285
+
286
+ print("\nStarting chunk %d (%d seed URLs)..." % (chunk_num, len(url_queue)), flush=True)
287
+
288
+ pages_tried = 0
289
+ last_status = time.time()
290
+
291
+ while chunk_bytes < CHUNK_TARGET_BYTES and url_queue:
292
+ batch = []
293
+ while url_queue and len(batch) < BATCH_SIZE:
294
+ url = url_queue.popleft()
295
+ domain = urlparse(url).netloc
296
+ dc = domain_counts.get(domain, 0)
297
+ if dc < MAX_URLS_PER_DOMAIN:
298
+ batch.append(url)
299
+
300
+ if not batch:
301
+ print(" Queue empty, refilling seeds...", flush=True)
302
+ for u in get_random_wikipedia_urls(300):
303
+ if u not in discovered:
304
+ url_queue.append(u)
305
+ discovered.add(u)
306
+ for u in get_hn_urls(30):
307
+ if u not in discovered:
308
+ url_queue.append(u)
309
+ discovered.add(u)
310
+ if not url_queue:
311
+ time.sleep(10)
312
+ continue
313
+
314
+ with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
315
+ futures = {executor.submit(fetch_and_extract, url): url for url in batch}
316
+ for future in as_completed(futures):
317
+ pages_tried += 1
318
+ result = future.result()
319
+ if result is None:
320
+ continue
321
+
322
+ final_url, text, domain, links = result
323
+ h = content_hash(text)
324
+ if h in seen_hashes:
325
+ continue
326
+ seen_hashes.add(h)
327
+
328
+ doc = json.dumps({
329
+ "text": text,
330
+ "url": final_url,
331
+ "domain": domain,
332
+ "timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
333
+ "source": "crawl_v5",
334
+ }, ensure_ascii=False)
335
+
336
+ with write_lock:
337
+ f.write(doc + "\n")
338
+ doc_bytes = len(doc.encode("utf-8"))
339
+ chunk_bytes += doc_bytes
340
+ chunk_docs += 1
341
+ total_docs += 1
342
+ domain_counts[domain] = domain_counts.get(domain, 0) + 1
343
+
344
+ random.shuffle(links)
345
+ for link in links[:20]:
346
+ if link not in discovered and len(url_queue) < 500000:
347
+ url_queue.append(link)
348
+ discovered.add(link)
349
+
350
+ now = time.time()
351
+ if now - last_status > 60:
352
+ rate = chunk_docs / max(1, now - chunk_start)
353
+ elapsed_min = (now - chunk_start) / 60
354
+ print(" Chunk %d: %d docs, %.0fMB, %d queued, %.1f docs/s, "
355
+ "tried %d pages, %.0f min elapsed" %
356
+ (chunk_num, chunk_docs, chunk_bytes / 1e6,
357
+ len(url_queue), rate, pages_tried, elapsed_min), flush=True)
358
+ last_status = now
359
+ state["seen_hashes"] = list(seen_hashes)
360
+ state["domain_counts"] = domain_counts
361
+ state["total_docs"] = total_docs
362
+ save_state(state)
363
+
364
+ f.close()
365
+
366
+ if chunk_docs == 0:
367
+ print(" No docs in chunk, cleaning up", flush=True)
368
+ try:
369
+ os.remove(chunk_path)
370
+ except OSError:
371
+ pass
372
+ print(" Waiting 5 min before retrying...", flush=True)
373
+ time.sleep(300)
374
+ continue
375
+
376
+ elapsed = time.time() - chunk_start
377
+ compressed_size = os.path.getsize(chunk_path)
378
+ print(" Chunk %d complete: %d docs, %.0fMB uncompressed, %.0fMB compressed, %.0f min" %
379
+ (chunk_num, chunk_docs, chunk_bytes / 1e6, compressed_size / 1e6, elapsed / 60), flush=True)
380
+
381
+ if upload_chunk(chunk_path, chunk_name):
382
+ try:
383
+ os.remove(chunk_path)
384
+ except OSError:
385
+ pass
386
+ chunk_num += 1
387
+ state["chunk_num"] = chunk_num
388
+ state["total_docs"] = total_docs
389
+ state["total_bytes_uploaded"] = state.get("total_bytes_uploaded", 0) + compressed_size
390
+ state["seen_hashes"] = list(seen_hashes)
391
+ state["domain_counts"] = domain_counts
392
+ save_state(state)
393
+ print(" Total: %d docs, %d chunks uploaded" % (total_docs, chunk_num), flush=True)
394
+ else:
395
+ print(" Upload failed, will retry chunk", flush=True)
396
+ try:
397
+ os.remove(chunk_path)
398
+ except OSError:
399
+ pass
400
+
401
+ time.sleep(10)
402
+
403
+
404
+ if __name__ == "__main__":
405
+ main()