Datasets:
Sub-tasks:
multi-class-classification
Languages:
English
Size:
1K<n<10K
Tags:
natural-language-understanding
ideology classification
text classification
natural language processing
License:
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# TODO: Address all TODOs and remove all explanatory comments | |
"""This script's purpose is to re-define the dataset loading functions to better suit this specific | |
reddit posts dataset.""" | |
import csv | |
import json | |
import os | |
import pandas as pd | |
from sklearn.model_selection import train_test_split | |
import datasets | |
# TODO: Add BibTeX citation | |
# Find for instance the citation on arxiv or on the dataset repo/website | |
_CITATION = """\ | |
@InProceedings{huggingface:dataset, | |
title = {Pill Ideologies Subreddits Dataset}, | |
author={Eric Rios}, | |
year={2024} | |
source = {reddit.com} | |
} | |
""" | |
# TODO: Add description of the dataset here | |
# You can copy an official description | |
_DESCRIPTION = """\ | |
This new dataset is designed to aid research in the ongoing study of the pill ideologies subreddits, | |
which have risen in response to the clashes between traditional gender roles and the rise of fourth wave feminism. | |
""" | |
# TODO: Add a link to an official homepage for the dataset here | |
_HOMEPAGE = "https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles" | |
# TODO: Add the licence for the dataset here if you can find it | |
_LICENSE = "cc" # cc-by-4.0 | |
# TODO: Add link to the official dataset URLs here | |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files. | |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) | |
_URLS = { | |
"first_domain": "https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles/resolve/main/reddit_posts_fm.csv", | |
"second_domain": "https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles/resolve/main/reddit_posts_fm.csv", | |
} | |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case | |
class SubRedditPosts(datasets.GeneratorBasedBuilder): | |
"""This dataset contains data from the pill ideologies subreddits and the feminism subreddit. | |
It has the subreddit,post_id, title, text, url, score, author, and date. | |
It was fully scraped for new subreddits on March 19th, 2024.""" | |
VERSION = datasets.Version("1.1.0") | |
# This is an example of a dataset with multiple configurations. | |
# If you don't want/need to define several sub-sets in your dataset, | |
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. | |
# If you need to make complex sub-parts in the datasets with configurable options | |
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig | |
# BUILDER_CONFIG_CLASS = MyBuilderConfig | |
# You will be able to load one or the other configurations in the following list with | |
# data = datasets.load_dataset('my_dataset', 'first_domain') | |
# data = datasets.load_dataset('my_dataset', 'second_domain') | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig( | |
name="first_domain", | |
version=VERSION, | |
description="This part of my dataset covers a first domain", | |
), | |
datasets.BuilderConfig( | |
name="second_domain", | |
version=VERSION, | |
description="This part of my dataset covers a second domain", | |
), | |
] | |
DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense. | |
def _info(self): | |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset | |
if ( | |
self.config.name == "first_domain" | |
): # This is the name of the configuration selected in BUILDER_CONFIGS above | |
features = datasets.Features( | |
{ | |
"subreddit": datasets.Value("string"), | |
"id": datasets.Value("string"), | |
"title": datasets.Value("string"), | |
"text": datasets.Value("string"), | |
"url": datasets.Value("string"), | |
"score": datasets.Value("int64"), | |
# "author": datasets.Value("string"), removed for privacy reasons | |
"date": datasets.Value("float64"), | |
"subreddit_subscribers": datasets.Value("int64"), | |
"num_comments": datasets.Value("int64"), | |
"ups": datasets.Value("int64"), | |
"downs": datasets.Value("int64"), | |
"upvote_ratio": datasets.Value("float64"), | |
"is_video": datasets.Value("bool"), | |
# These are the features of your dataset like images, labels ... | |
} | |
) | |
else: # This is an example to show how to have different features for "first_domain" and "second_domain" | |
features = datasets.Features( | |
{ | |
"subreddit": datasets.Value("string"), | |
"id": datasets.Value("string"), | |
"title": datasets.Value("string"), | |
"text": datasets.Value("string"), | |
"url": datasets.Value("string"), | |
"score": datasets.Value("int64"), | |
# "author": datasets.Value("string"), removed for privacy reasons | |
"date": datasets.Value("float64"), | |
"subreddit_subscribers": datasets.Value("int64"), | |
"num_comments": datasets.Value("int64"), | |
"ups": datasets.Value("int64"), | |
"downs": datasets.Value("int64"), | |
"upvote_ratio": datasets.Value("float64"), | |
"is_video": datasets.Value("bool"), | |
# These are the features of your dataset like images, labels ... | |
} | |
) | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=features, # Here we define them above because they are different between the two configurations | |
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and | |
# specify them. They'll be used if as_supervised=True in builder.as_dataset. | |
# supervised_keys=("sentence", "label"), | |
# Homepage of the dataset for documentation | |
homepage=_HOMEPAGE, | |
# License for the dataset if available | |
license=_LICENSE, | |
# Citation for the dataset | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration | |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name | |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS | |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. | |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive | |
urls = _URLS[self.config.name] | |
data_dir = dl_manager.download_and_extract(urls) | |
data = pd.read_csv(data_dir) | |
data = self.process_data(data) | |
# commented out the splits, due to google colab being uncooperative | |
# raised too many errors that my local machine did not, | |
# make splits | |
train, test = train_test_split( | |
data, test_size=0.10, stratify=data["subreddit"], random_state=42 | |
) | |
train, val = train_test_split( | |
train, test_size=0.20, stratify=train["subreddit"], random_state=42 | |
) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
# "filepath": train, | |
"filepath": train, | |
"split": "train", | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": val, | |
"split": "dev", | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": test, | |
"split": "test", | |
}, | |
), | |
] | |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
def _generate_examples(self, filepath, split): | |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. | |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. | |
for key, row in filepath.iterrows(): | |
if self.config.name == "first_domain": | |
yield key, { | |
"subreddit": row.get("subreddit"), | |
"id": row.get("id"), | |
"title": row.get("title"), | |
"text": row.get("text"), | |
"url": row.get("url"), | |
"score": row.get("score"), | |
# "author": row.get("author"), removed for privacy reasons | |
"date": row.get("date"), | |
"subreddit_subscribers": row.get("subreddit_subscribers"), | |
"num_comments": row.get("num_comments"), | |
"ups": row.get("ups"), | |
"downs": row.get("downs"), | |
"upvote_ratio": row.get("upvote_ratio"), | |
"is_video": row.get("is_video"), | |
} | |
else: | |
yield key, { | |
"subreddit": row.get("subreddit"), | |
"id": row.get("id"), | |
"title": row.get("title"), | |
"text": row.get("text"), | |
"url": row.get("url"), | |
"score": row.get("score"), | |
# "author": row.get("author"), removed for privacy reasons | |
"date": row.get("date"), | |
"subreddit_subscribers": row.get("subreddit_subscribers"), | |
"num_comments": row.get("num_comments"), | |
"ups": row.get("ups"), | |
"downs": row.get("downs"), | |
"upvote_ratio": row.get("upvote_ratio"), | |
"is_video": row.get("is_video"), | |
} | |
def process_data(self, df): | |
"""This function takes a dataframe and processes it to remove any unwanted columns and rows""" | |
# columns that cannot be empty,so drop rows | |
df = df.dropna(subset=["subreddit"]) | |
df = df.dropna(subset=["title"]) | |
# cleaning to make colab importing the dataset through huggingface work | |
values = { | |
"id": "", | |
"text": "", | |
"url": "", | |
"score": 0, | |
"date": 0.0, | |
"subreddit_subscribers": 0, | |
"num_comments": 0, | |
"ups": 0, | |
"downs": 0, | |
"upvote_ratio": 0.0, | |
"is_video": False, | |
} | |
df.fillna(value=values, inplace=True) | |
return df | |