Datasets:
Sub-tasks:
multi-class-classification
Languages:
English
Size:
1K<n<10K
Tags:
natural-language-understanding
ideology classification
text classification
natural language processing
License:
EricR401S
commited on
Commit
•
0010451
1
Parent(s):
a225649
done
Browse files
Pill_Ideologies-Post_Titles.py
CHANGED
@@ -53,10 +53,8 @@ _LICENSE = "cc" # cc-by-4.0
|
|
53 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
54 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
55 |
_URLS = {
|
56 |
-
# "first_domain": "https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles/blob/main/reddit_posts_fm.csv",
|
57 |
-
# "first_domain": "https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles/raw/main/reddit_posts_fm.csv",
|
58 |
"first_domain": "https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles/resolve/main/reddit_posts_fm.csv",
|
59 |
-
"second_domain": "https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles/
|
60 |
}
|
61 |
|
62 |
|
@@ -166,11 +164,10 @@ class SubRedditPosts(datasets.GeneratorBasedBuilder):
|
|
166 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
167 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
168 |
urls = _URLS[self.config.name]
|
169 |
-
print(urls)
|
170 |
data_dir = dl_manager.download_and_extract(urls)
|
171 |
-
print(data_dir, type(data_dir), "checking type")
|
172 |
data = pd.read_csv(data_dir)
|
173 |
-
|
|
|
174 |
train, test = train_test_split(
|
175 |
data, test_size=0.10, stratify=data["subreddit"], random_state=42
|
176 |
)
|
@@ -209,14 +206,9 @@ class SubRedditPosts(datasets.GeneratorBasedBuilder):
|
|
209 |
def _generate_examples(self, filepath, split):
|
210 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
211 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
212 |
-
|
213 |
-
print(split, "is the split")
|
214 |
-
print(filepath.shape, "is the filepath")
|
215 |
-
i = 0
|
216 |
for key, row in filepath.iterrows():
|
217 |
-
|
218 |
-
# print(key, "is the key", i, "is the index")
|
219 |
-
# print(row, "is the row")
|
220 |
if self.config.name == "first_domain":
|
221 |
yield key, {
|
222 |
"subreddit": row.get("subreddit"),
|
@@ -235,7 +227,6 @@ class SubRedditPosts(datasets.GeneratorBasedBuilder):
|
|
235 |
"num_reports": row.get("num_reports"),
|
236 |
"is_video": row.get("is_video"),
|
237 |
}
|
238 |
-
i += 1
|
239 |
|
240 |
else:
|
241 |
yield key, {
|
|
|
53 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
54 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
55 |
_URLS = {
|
|
|
|
|
56 |
"first_domain": "https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles/resolve/main/reddit_posts_fm.csv",
|
57 |
+
"second_domain": "https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles/resolve/main/reddit_posts_fm.csv",
|
58 |
}
|
59 |
|
60 |
|
|
|
164 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
165 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
166 |
urls = _URLS[self.config.name]
|
|
|
167 |
data_dir = dl_manager.download_and_extract(urls)
|
|
|
168 |
data = pd.read_csv(data_dir)
|
169 |
+
|
170 |
+
# make splits
|
171 |
train, test = train_test_split(
|
172 |
data, test_size=0.10, stratify=data["subreddit"], random_state=42
|
173 |
)
|
|
|
206 |
def _generate_examples(self, filepath, split):
|
207 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
208 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
209 |
+
|
|
|
|
|
|
|
210 |
for key, row in filepath.iterrows():
|
211 |
+
|
|
|
|
|
212 |
if self.config.name == "first_domain":
|
213 |
yield key, {
|
214 |
"subreddit": row.get("subreddit"),
|
|
|
227 |
"num_reports": row.get("num_reports"),
|
228 |
"is_video": row.get("is_video"),
|
229 |
}
|
|
|
230 |
|
231 |
else:
|
232 |
yield key, {
|