slvnwhrl commited on
Commit
f51f627
1 Parent(s): d0178fa

add extraction file

Browse files
Files changed (1) hide show
  1. extract_data.py +94 -0
extract_data.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Script to generate splits for benchmarking text embedding clustering.
2
+ Based on data from GermEval 2019 Shared Task on Hierarchical Tesk Classification (https://www.inf.uni-hamburg.de/en/inst/ab/lt/resources/data/germeval-2019-hmc.html)."""
3
+
4
+ import os
5
+ import random
6
+ import sys
7
+ from collections import Counter
8
+
9
+ import jsonlines
10
+ import numpy as np
11
+ import pandas as pd
12
+ from bs4 import BeautifulSoup
13
+
14
+ random.seed(42)
15
+
16
+ # path to "data" folder, can be retrieved from here: https://www.inf.uni-hamburg.de/en/inst/ab/lt/resources/data/germeval-2019-hmc/germeval2019t1-public-data-final.zip
17
+ DATA_PATH = sys.argv[1]
18
+
19
+ INCLUDE_BODY = (
20
+ True # True: combine title and article body (p2p), False: only title (s2s)
21
+ )
22
+
23
+ NUM_SPLITS = 10
24
+ SPLIT_RANGE = np.array([0.1, 1.0])
25
+
26
+
27
+ def get_samples(soup, include_body=INCLUDE_BODY):
28
+ d1_counter = Counter([d1.string for d1 in soup.find_all("topic", {"d": 1})])
29
+
30
+ samples = []
31
+ for book in soup.find_all("book"):
32
+ if book.title.string is None or book.body.string is None:
33
+ continue
34
+
35
+ d0_topics = list(set([d.string for d in book.find_all("topic", {"d": 0})]))
36
+ d1_topics = list(set([d.string for d in book.find_all("topic", {"d": 1})]))
37
+
38
+ if len(d0_topics) != 1:
39
+ continue
40
+ if len(d1_topics) < 1 or len(d1_topics) > 2:
41
+ continue
42
+
43
+ d0_label = d0_topics[0]
44
+ d1_label = sorted(d1_topics, key=lambda x: d1_counter[x])[0]
45
+
46
+ text = book.title.string
47
+ if include_body:
48
+ text += "\n" + book.body.string
49
+
50
+ samples.append([text, d0_label, d1_label])
51
+
52
+ return pd.DataFrame(samples, columns=["sentences", "d0_label", "d1_label"])
53
+
54
+
55
+ def get_split(frame, label="d0_label", split_range=SPLIT_RANGE):
56
+ samples = random.randint(*(split_range * len(frame)).astype(int))
57
+ return (
58
+ frame.sample(samples)[["sentences", label]]
59
+ .rename(columns={label: "labels"})[["sentences", "labels"]]
60
+ .to_dict("list")
61
+ )
62
+
63
+
64
+ def write_sets(name, sets):
65
+ with jsonlines.open(name, "w") as f_out:
66
+ f_out.write_all(sets)
67
+
68
+
69
+ train = open(os.path.join(DATA_PATH, "blurbs_train.txt"), encoding="utf-8").read()
70
+ dev = open(os.path.join(DATA_PATH, "blurbs_dev.txt"), encoding="utf-8").read()
71
+ test = open(os.path.join(DATA_PATH, "blurbs_test.txt"), encoding="utf-8").read()
72
+
73
+ soup = BeautifulSoup(train + "\n\n" + dev + "\n\n" + test, "html.parser")
74
+
75
+ samples = get_samples(soup)
76
+
77
+ sets = []
78
+ # coarse clustering
79
+ for _ in range(NUM_SPLITS):
80
+ sets.append(get_split(samples))
81
+
82
+ # fine grained clustering inside top-level category (d0)
83
+ for d0 in samples["d0_label"].unique():
84
+ sets.append(
85
+ (samples[samples.d0_label == d0])
86
+ .rename(columns={"d1_label": "labels"})[["sentences", "labels"]]
87
+ .to_dict("list")
88
+ )
89
+
90
+ # fine grained clustering
91
+ for _ in range(NUM_SPLITS):
92
+ sets.append(get_split(samples, label="d1_label"))
93
+
94
+ write_sets("test.jsonl", sets)