cathw commited on
Commit
117da23
1 Parent(s): 7eb7822

Upload reddit_climate_data.py

Browse files
Files changed (1) hide show
  1. reddit_climate_data.py +182 -0
reddit_climate_data.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """TODO: Add a description here."""
3
+
4
+
5
+ import csv
6
+ import json
7
+ import os
8
+ import logging
9
+
10
+
11
+ import datasets
12
+
13
+
14
+ # TODO: Add BibTeX citation
15
+ # Find for instance the citation on arxiv or on the dataset repo/website
16
+ _CITATION = """\
17
+ @InProceedings{huggingface:dataset,
18
+ title = {A great new dataset},
19
+ author={huggingface, Inc.
20
+ },
21
+ year={2024}
22
+ }
23
+ """
24
+
25
+ # TODO: Add description of the dataset here
26
+ # You can copy an official description
27
+ _DESCRIPTION = """\
28
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
29
+ """
30
+
31
+ # TODO: Add a link to an official homepage for the dataset here
32
+ _HOMEPAGE = ""
33
+
34
+ # TODO: Add the licence for the dataset here if you can find it
35
+ _LICENSE = ""
36
+
37
+ # TODO: Add link to the official dataset URLs here
38
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
39
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
40
+ _URLS = {
41
+ "reddit_climate": "cathw/reddit_climate_comment"
42
+ }
43
+
44
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
45
+ class NewDataset(datasets.GeneratorBasedBuilder):
46
+ """TODO: Short description of my dataset."""
47
+
48
+ VERSION = datasets.Version("1.1.0")
49
+
50
+ # This is an example of a dataset with multiple configurations.
51
+ # If you don't want/need to define several sub-sets in your dataset,
52
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
53
+
54
+ # If you need to make complex sub-parts in the datasets with configurable options
55
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
56
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
57
+
58
+ # You will be able to load one or the other configurations in the following list with
59
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
60
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
61
+ BUILDER_CONFIGS = [
62
+ datasets.BuilderConfig(name="reddit_climate", version=VERSION, description="This part of my dataset covers a first domain")
63
+ ]
64
+
65
+ DEFAULT_CONFIG_NAME = "reddit_climate" # It's not mandatory to have a default configuration. Just use one if it make sense.
66
+
67
+ def _info(self):
68
+
69
+ features = datasets.Features({
70
+ "Subreddit": datasets.Value("string"),
71
+ "Posts": datasets.Sequence({
72
+ "PostID": datasets.Value("int32"),
73
+ "PostTitle": datasets.Value("string"),
74
+ "Comments": datasets.Sequence({
75
+ "CommentID": datasets.Value("string"),
76
+ "Author": datasets.Value("string"),
77
+ "CommentBody": datasets.Value("string"),
78
+ "Timestamp": datasets.Value("string"),
79
+ "Upvotes": datasets.Value("int32"),
80
+ "NumberofReplies": datasets.Value("int32"),
81
+ }),
82
+ }),
83
+ })
84
+ return datasets.DatasetInfo(
85
+ # This is the description that will appear on the datasets page.
86
+ description=_DESCRIPTION,
87
+ # This defines the different columns of the dataset and their types
88
+ features=features, # Here we define them above because they are different between the two configurations
89
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
90
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
91
+ # supervised_keys=("sentence", "label"),
92
+ # Homepage of the dataset for documentation
93
+ homepage=_HOMEPAGE,
94
+ # License for the dataset if available
95
+ license=_LICENSE,
96
+ # Citation for the dataset
97
+ citation=_CITATION,
98
+ )
99
+
100
+ def _split_generators(self, dl_manager):
101
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
102
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
103
+
104
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
105
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
106
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
107
+ config_name = getattr(self.config, 'name', self.DEFAULT_CONFIG_NAME)
108
+ urls = _URLS.get(config_name, {}) # Get the URLs for the configuration name, if not found, return an empty dictionary
109
+ data_dir = dl_manager.download_and_extract(urls)
110
+ return [
111
+ datasets.SplitGenerator(
112
+ name=datasets.Split.TRAIN,
113
+ # These kwargs will be passed to _generate_examples
114
+ gen_kwargs={
115
+ "filepath": os.path.join(data_dir, "train.jsonl"),
116
+ "split": "train",
117
+ },
118
+ ),
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.VALIDATION,
121
+ # These kwargs will be passed to _generate_examples
122
+ gen_kwargs={
123
+ "filepath": os.path.join(data_dir, "dev.jsonl"),
124
+ "split": "dev",
125
+ },
126
+ ),
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.TEST,
129
+ # These kwargs will be passed to _generate_examples
130
+ gen_kwargs={
131
+ "filepath": os.path.join(data_dir, "test.jsonl"),
132
+ "split": "test"
133
+ },
134
+ ),
135
+ ]
136
+
137
+
138
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
139
+ def _generate_examples(self, filepath, split):
140
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
141
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
142
+ with open(filepath, encoding="utf-8") as f:
143
+ data = json.load(f)
144
+ for idx, row in enumerate(data):
145
+ subreddit = row["Subreddit"]
146
+ posts = []
147
+
148
+ # Check if the "Posts" key is present in the current row
149
+ if "Posts" in row:
150
+ for post in row["Posts"]:
151
+ post_id = post["PostID"]
152
+ post_title = post["PostTitle"]
153
+ comments = []
154
+ for comment in post["Comments"]:
155
+ comment_id = comment["CommentID"]
156
+ author = comment["Author"]
157
+ comment_body = comment["CommentBody"]
158
+ timestamp = comment["Timestamp"]
159
+ upvotes = comment["Upvotes"]
160
+ number_of_replies = comment["NumberofReplies"]
161
+ logging.debug(f"Processing comment: {comment_id}, Upvotes: {upvotes}")
162
+ comments.append({
163
+ "CommentID": comment_id,
164
+ "Author": author,
165
+ "CommentBody": comment_body,
166
+ "Timestamp": timestamp,
167
+ "Upvotes": upvotes,
168
+ "NumberofReplies": number_of_replies
169
+ })
170
+ posts.append({
171
+ "PostID": post_id,
172
+ "PostTitle": post_title,
173
+ "Comments": comments
174
+ })
175
+ else:
176
+ # Handle cases where the "Posts" key is missing
177
+ posts = None
178
+
179
+ yield idx, {
180
+ "Subreddit": subreddit,
181
+ "Posts": posts
182
+ }