self-annotated_reddit_climate_comment / self-annotated_reddit_climate_comment.py
cathw's picture
Upload self-annotated_reddit_climate_comment.py
6f78e6a verified
raw
history blame
4.26 kB
import csv
import json
import os
from datasets import GeneratorBasedBuilder, Features, Value, Sequence, SplitGenerator, BuilderConfig, DatasetInfo, Split, Image
import logging
import pandas as pd
from typing import Dict
CITATION = ""
_DESCRIPTION = ""
_HOMEPAGE = "https://huggingface.co/datasets/SarcasmNet/self-annotated_reddit_climate_comment"
_LICENSE = "MIT"
_URL = "https://github.com/catherine-ywang/Reddit-Climate-Environment-Sarcasm-Self-Annotated-Data/raw/main/self_annotated_comments.csv"
class NewDataset(GeneratorBasedBuilder):
def _info(self):
return DatasetInfo(
description=_DESCRIPTION,
features=Features({
"id": Value("string"),
"post_title": Value("string"),
"post_author": Value("string"),
"post_body": Value("string"),
"post_url": Value("string"),
"post_pic": Image(),
"subreddit": Value("string"),
"post_timestamp": Value("string"),
"post_upvotes": Value("int32"),
"post_permalink": Value("string"),
"comments": Sequence({
"CommentID": Value("string"),
"CommentAuthor": Value("string"),
"CommentBody": Value("string"),
"CommentTimestamp": Value("string"),
"CommentUpvotes": Value("int32"),
"CommentPermalink": Value("string"),
"Label": Value("int32")
})
}),
homepage=_HOMEPAGE,
)
def _split_generators(self, dl_manager):
return [SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepath": dl_manager.download(_URL)})]
def _generate_examples(self, filepath):
df = pd.read_csv(filepath)
for column in df.columns:
df[column] = df[column].replace({pd.NA: None})
# Group the DataFrame by post ID
grouped_df = df.groupby('PostID')
for post_id, group in grouped_df:
post_data = group.iloc[0] # Get the data for the post
post_title = post_data['PostTitle']
post_author = post_data['PostAuthor']
post_body = post_data['PostBody']
post_url = post_data['PostUrl']
post_pic = post_data['PostPic']
subreddit = post_data['Subreddit']
post_timestamp = post_data['PostTimestamp']
post_upvotes = post_data['PostUpvotes']
post_permalink = post_data['PostPermalink']
comments = []
# Iterate over each unique comment ID
for comment_id in group['CommentID'].unique():
comment_data = group[group['CommentID'] == comment_id].iloc[0]
comment_author = comment_data['CommentAuthor']
comment_body = comment_data['CommentBody']
comment_timestamp = comment_data['CommentTimestamp']
comment_upvotes = comment_data['CommentUpvotes']
comment_permalink = comment_data['CommentPermalink']
comment_label = comment_data['Label']
# Add comment with its replies to the list
comment = {
"CommentID": comment_id,
"CommentAuthor": comment_author,
"CommentBody": comment_body,
"CommentTimestamp": comment_timestamp,
"CommentUpvotes": comment_upvotes,
"CommentPermalink": comment_permalink,
"Label": comment_label
}
comments.append(comment)
example = {
"id": post_id,
"post_title": post_title,
"post_author": post_author,
"post_body": post_body,
"post_url": post_url,
"post_pic": post_pic,
"subreddit": subreddit,
"post_timestamp": post_timestamp,
"post_upvotes": post_upvotes,
"post_permalink": post_permalink,
"comments": comments
}
yield post_id, example