Datasets:
File size: 5,232 Bytes
c5b8a78 a5df15a c5b8a78 3fa893d c5b8a78 a5df15a 57f4032 a5df15a 57f4032 a5df15a 57f4032 a5df15a 57f4032 a5df15a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
import csv
import json
import os
from datasets import GeneratorBasedBuilder, Features, Value, Sequence, SplitGenerator, BuilderConfig, DatasetInfo, Split
import logging
import pandas as pd
from typing import Dict
CITATION = ""
_DESCRIPTION = "Demo"
_URL = ""
_HOMEPAGE = ""
_LICENSE = ""
_URL = "https://github.com/catherine-ywang/reddit_climate_comment_data/raw/main/climate_comments.json.zip"
class NewDataset(GeneratorBasedBuilder):
def _info(self):
return DatasetInfo(
description=_DESCRIPTION,
features = Features({
"Posts": Sequence({
"PostID": Value("string"),
"PostTitle": Value("string"),
"PostAuthor": Value("string"),
"PostBody": Value("string"),
"PostUrl": Value("string"),
"PostPic": Value("string"),
"Subreddit": Value("string"),
"PostTimestamp": Value("string"),
"PostUpvotes": Value("int32"),
"PostPermalink": Value("string"),
"Comments": Sequence({
"CommentID": Value("string"),
"CommentAuthor": Value("string"),
"CommentBody": Value("string"),
"CommentTimestamp": Value("string"),
"CommentUpvotes": Value("int32"),
"CommentPermalink": Value("string"),
"Replies": Sequence({
"ReplyID": Value("string"),
"ReplyAuthor": Value("string"),
"ReplyBody": Value("string"),
"ReplyTimestamp": Value("string"),
"ReplyUpvotes": Value("int32"),
"ReplyPermalink": Value("string"),
})
})
})
}),
homepage=_HOMEPAGE,
)
def _split_generators(self, dl_manager):
path = dl_manager.download_and_extract(_URL)
train_splits = SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepath": path+"/climate_comments.json"})
return [train_splits]
def _generate_examples(self, filepath):
with open(filepath, encoding="utf-8") as f:
data = json.load(f)
for post in data["Posts"]:
post_id = post["PostID"]
post_title = post["PostTitle"]
post_author = post["PostAuthor"]
post_body = post["PostBody"]
post_url = post["PostUrl"]
post_pic = post["PostPic"]
subreddit = post["Subreddit"]
post_timestamp = post["PostTimestamp"]
post_upvotes = int(post["PostUpvotes"])
post_permalink = post["PostPermalink"]
comments = post["Comments"]
post_data = {
"id": post_id,
"post_title": post_title,
"post_author": post_author,
"post_body": post_body,
"post_url": post_url,
"post_pic": post_pic,
"subreddit": subreddit,
"post_timestamp": post_timestamp,
"post_upvotes": post_upvotes,
"post_permalink": post_permalink,
"comments": []
}
for comment in comments:
comment_id = comment["CommentID"]
comment_author = comment["CommentAuthor"]
comment_body = comment["CommentBody"]
comment_timestamp = comment["CommentTimestamp"]
comment_upvotes = int(comment["CommentUpvotes"])
comment_permalink = comment["CommentPermalink"]
comment_data = {
"CommentID": comment_id,
"CommentAuthor": comment_author,
"CommentBody": comment_body,
"CommentTimestamp": comment_timestamp,
"CommentUpvotes": comment_upvotes,
"CommentPermalink": comment_permalink,
"Replies": []
}
replies = comment.get("Replies", [])
for reply in replies:
reply_id = reply["ReplyID"]
reply_author = reply["ReplyAuthor"]
reply_body = reply["ReplyBody"]
reply_timestamp = reply["ReplyTimestamp"]
reply_upvotes = int(reply["ReplyUpvotes"])
reply_permalink = reply["ReplyPermalink"]
reply_data = {
"ReplyID": reply_id,
"ReplyAuthor": reply_author,
"ReplyBody": reply_body,
"ReplyTimestamp": reply_timestamp,
"ReplyUpvotes": reply_upvotes,
"ReplyPermalink": reply_permalink
}
comment_data["Replies"].append(reply_data)
post_data["comments"].append(comment_data)
yield post_id, post_data
|