slvnwhrl commited on
Commit
432066f
1 Parent(s): be05d3c

add extraction script

Browse files
Files changed (1) hide show
  1. extract_data.py +91 -0
extract_data.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Script to generate splits for benchmarking text embedding clustering.
2
+ Data and preprocessing based on 10kGNAD dataset (https://github.com/tblock/10kGNAD)."""
3
+
4
+ import random
5
+ import re
6
+ import sqlite3
7
+ import sys
8
+
9
+ import jsonlines
10
+ import numpy as np
11
+ import pandas as pd
12
+ from bs4 import BeautifulSoup
13
+ from sklearn.model_selection import train_test_split
14
+ from tqdm import tqdm
15
+
16
+ random.seed(42)
17
+
18
+ # path to corpus file, can be retrieved from here: https://github.com/tblock/10kGNAD/releases/download/v1.0/corpus.sqlite3
19
+ DATA_PATH = sys.argv[1]
20
+
21
+ INCLUDE_BODY = (
22
+ False # True: combine title and article body (p2p), False: only title (s2s)
23
+ )
24
+ ARTICLE_QUERY = f"SELECT Path, Title{', Body' if INCLUDE_BODY else ''} FROM Articles WHERE PATH LIKE 'Newsroom/%' AND PATH NOT LIKE 'Newsroom/User%' ORDER BY Path"
25
+
26
+ NUM_SPLITS = 10
27
+ SPLIT_RANGE = np.array([0.1, 1.0])
28
+
29
+
30
+ def get_split(frame, split_range=SPLIT_RANGE):
31
+ samples = random.randint(*(split_range * len(frame)).astype(int))
32
+ return frame.sample(samples).to_dict("list")
33
+
34
+
35
+ def write_sets(name, sets):
36
+ with jsonlines.open(name, "w") as f_out:
37
+ f_out.write_all(sets)
38
+
39
+
40
+ conn = sqlite3.connect(DATA_PATH)
41
+ cursor = conn.cursor()
42
+
43
+ samples = []
44
+ for row in tqdm(cursor.execute(ARTICLE_QUERY).fetchall(), unit_scale=True):
45
+ path, title = row[0], row[1]
46
+
47
+ text = title
48
+
49
+ if INCLUDE_BODY:
50
+ body = row[-1]
51
+ soup = BeautifulSoup(body, "html.parser")
52
+
53
+ # get description from subheadline
54
+ description_obj = soup.find("h2", {"itemprop": "description"})
55
+ if description_obj is not None:
56
+ text += (
57
+ " " + description_obj.text.replace("\n", " ").replace("\t", " ").strip()
58
+ )
59
+
60
+ # get text from paragraphs
61
+ text_container = soup.find("div", {"class": "copytext"})
62
+ if text_container is not None:
63
+ for p in text_container.findAll("p"):
64
+ text += " " + (
65
+ p.text.replace("\n", " ")
66
+ .replace("\t", " ")
67
+ .replace('"', "")
68
+ .replace("'", "")
69
+ + " "
70
+ )
71
+ text = text.strip()
72
+
73
+ # remove article autors
74
+ for author in re.findall(
75
+ r"\.\ \(.+,.+2[0-9]+\)", text[-50:]
76
+ ): # some articles have a year of 21015..
77
+ text = text.replace(author, ".")
78
+
79
+ # get label from path
80
+ label = path.split("/")[1]
81
+ samples.append([text, label])
82
+
83
+ conn.close()
84
+
85
+ samples = pd.DataFrame(samples, columns=["sentences", "labels"])
86
+
87
+ sets = []
88
+ for _ in range(NUM_SPLITS):
89
+ sets.append(get_split(samples))
90
+
91
+ write_sets("test.jsonl", sets)