File size: 2,630 Bytes
432066f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
"""Script to generate splits for benchmarking text embedding clustering.
Data and preprocessing based on 10kGNAD dataset (https://github.com/tblock/10kGNAD)."""

import random
import re
import sqlite3
import sys

import jsonlines
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from sklearn.model_selection import train_test_split
from tqdm import tqdm

random.seed(42)

# path to corpus file, can be retrieved from here: https://github.com/tblock/10kGNAD/releases/download/v1.0/corpus.sqlite3
DATA_PATH = sys.argv[1]

INCLUDE_BODY = (
    False  # True: combine title and article body (p2p), False: only title (s2s)
)
ARTICLE_QUERY = f"SELECT Path, Title{', Body' if INCLUDE_BODY else ''} FROM Articles WHERE PATH LIKE 'Newsroom/%' AND PATH NOT LIKE 'Newsroom/User%' ORDER BY Path"

NUM_SPLITS = 10
SPLIT_RANGE = np.array([0.1, 1.0])


def get_split(frame, split_range=SPLIT_RANGE):
    samples = random.randint(*(split_range * len(frame)).astype(int))
    return frame.sample(samples).to_dict("list")


def write_sets(name, sets):
    with jsonlines.open(name, "w") as f_out:
        f_out.write_all(sets)


conn = sqlite3.connect(DATA_PATH)
cursor = conn.cursor()

samples = []
for row in tqdm(cursor.execute(ARTICLE_QUERY).fetchall(), unit_scale=True):
    path, title = row[0], row[1]

    text = title

    if INCLUDE_BODY:
        body = row[-1]
        soup = BeautifulSoup(body, "html.parser")

        # get description from subheadline
        description_obj = soup.find("h2", {"itemprop": "description"})
        if description_obj is not None:
            text += (
                " " + description_obj.text.replace("\n", " ").replace("\t", " ").strip()
            )

        # get text from paragraphs
        text_container = soup.find("div", {"class": "copytext"})
        if text_container is not None:
            for p in text_container.findAll("p"):
                text += " " + (
                    p.text.replace("\n", " ")
                    .replace("\t", " ")
                    .replace('"', "")
                    .replace("'", "")
                    + " "
                )
        text = text.strip()

    # remove article autors
    for author in re.findall(
        r"\.\ \(.+,.+2[0-9]+\)", text[-50:]
    ):  # some articles have a year of 21015..
        text = text.replace(author, ".")

    # get label from path
    label = path.split("/")[1]
    samples.append([text, label])

conn.close()

samples = pd.DataFrame(samples, columns=["sentences", "labels"])

sets = []
for _ in range(NUM_SPLITS):
    sets.append(get_split(samples))

write_sets("test.jsonl", sets)