File size: 2,924 Bytes
21d5444
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
"""Script to generate splits for benchmarking text embedding clustering.
Based on data from GermEval 2019 Shared Task on Hierarchical Tesk Classification (https://www.inf.uni-hamburg.de/en/inst/ab/lt/resources/data/germeval-2019-hmc.html)."""

import os
import random
import sys
from collections import Counter

import jsonlines
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup

random.seed(42)

# path to "data" folder, can be retrieved from here: https://www.inf.uni-hamburg.de/en/inst/ab/lt/resources/data/germeval-2019-hmc/germeval2019t1-public-data-final.zip
DATA_PATH = sys.argv[1]

INCLUDE_BODY = (
    True  # True: combine title and article body (p2p), False: only title (s2s)
)

NUM_SPLITS = 10
SPLIT_RANGE = np.array([0.1, 1.0])


def get_samples(soup, include_body=INCLUDE_BODY):
    d1_counter = Counter([d1.string for d1 in soup.find_all("topic", {"d": 1})])

    samples = []
    for book in soup.find_all("book"):
        if book.title.string is None or book.body.string is None:
            continue

        d0_topics = list(set([d.string for d in book.find_all("topic", {"d": 0})]))
        d1_topics = list(set([d.string for d in book.find_all("topic", {"d": 1})]))

        if len(d0_topics) != 1:
            continue
        if len(d1_topics) < 1 or len(d1_topics) > 2:
            continue

        d0_label = d0_topics[0]
        d1_label = sorted(d1_topics, key=lambda x: d1_counter[x])[0]

        text = book.title.string
        if include_body:
            text += "\n" + book.body.string

        samples.append([text, d0_label, d1_label])

    return pd.DataFrame(samples, columns=["sentences", "d0_label", "d1_label"])


def get_split(frame, label="d0_label", split_range=SPLIT_RANGE):
    samples = random.randint(*(split_range * len(frame)).astype(int))
    return (
        frame.sample(samples)[["sentences", label]]
        .rename(columns={label: "labels"})[["sentences", "labels"]]
        .to_dict("list")
    )


def write_sets(name, sets):
    with jsonlines.open(name, "w") as f_out:
        f_out.write_all(sets)


train = open(os.path.join(DATA_PATH, "blurbs_train.txt"), encoding="utf-8").read()
dev = open(os.path.join(DATA_PATH, "blurbs_dev.txt"), encoding="utf-8").read()
test = open(os.path.join(DATA_PATH, "blurbs_test.txt"), encoding="utf-8").read()

soup = BeautifulSoup(train + "\n\n" + dev + "\n\n" + test, "html.parser")

samples = get_samples(soup)

sets = []
# coarse clustering
for _ in range(NUM_SPLITS):
    sets.append(get_split(samples))

# fine grained clustering inside top-level category (d0)
for d0 in samples["d0_label"].unique():
    sets.append(
        (samples[samples.d0_label == d0])
        .rename(columns={"d1_label": "labels"})[["sentences", "labels"]]
        .to_dict("list")
    )

# fine grained clustering
for _ in range(NUM_SPLITS):
    sets.append(get_split(samples, label="d1_label"))

write_sets("test.jsonl", sets)