tenkgnad-clustering-s2s / extract_data.py
slvnwhrl's picture
add extraction script
432066f
"""Script to generate splits for benchmarking text embedding clustering.
Data and preprocessing based on 10kGNAD dataset (https://github.com/tblock/10kGNAD)."""
import random
import re
import sqlite3
import sys
import jsonlines
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from sklearn.model_selection import train_test_split
from tqdm import tqdm
random.seed(42)
# path to corpus file, can be retrieved from here: https://github.com/tblock/10kGNAD/releases/download/v1.0/corpus.sqlite3
DATA_PATH = sys.argv[1]
INCLUDE_BODY = (
False # True: combine title and article body (p2p), False: only title (s2s)
)
ARTICLE_QUERY = f"SELECT Path, Title{', Body' if INCLUDE_BODY else ''} FROM Articles WHERE PATH LIKE 'Newsroom/%' AND PATH NOT LIKE 'Newsroom/User%' ORDER BY Path"
NUM_SPLITS = 10
SPLIT_RANGE = np.array([0.1, 1.0])
def get_split(frame, split_range=SPLIT_RANGE):
samples = random.randint(*(split_range * len(frame)).astype(int))
return frame.sample(samples).to_dict("list")
def write_sets(name, sets):
with jsonlines.open(name, "w") as f_out:
f_out.write_all(sets)
conn = sqlite3.connect(DATA_PATH)
cursor = conn.cursor()
samples = []
for row in tqdm(cursor.execute(ARTICLE_QUERY).fetchall(), unit_scale=True):
path, title = row[0], row[1]
text = title
if INCLUDE_BODY:
body = row[-1]
soup = BeautifulSoup(body, "html.parser")
# get description from subheadline
description_obj = soup.find("h2", {"itemprop": "description"})
if description_obj is not None:
text += (
" " + description_obj.text.replace("\n", " ").replace("\t", " ").strip()
)
# get text from paragraphs
text_container = soup.find("div", {"class": "copytext"})
if text_container is not None:
for p in text_container.findAll("p"):
text += " " + (
p.text.replace("\n", " ")
.replace("\t", " ")
.replace('"', "")
.replace("'", "")
+ " "
)
text = text.strip()
# remove article autors
for author in re.findall(
r"\.\ \(.+,.+2[0-9]+\)", text[-50:]
): # some articles have a year of 21015..
text = text.replace(author, ".")
# get label from path
label = path.split("/")[1]
samples.append([text, label])
conn.close()
samples = pd.DataFrame(samples, columns=["sentences", "labels"])
sets = []
for _ in range(NUM_SPLITS):
sets.append(get_split(samples))
write_sets("test.jsonl", sets)