Datasets:
Sub-tasks:
multi-class-classification
Languages:
English
Size:
1K<n<10K
Tags:
natural-language-understanding
ideology classification
text classification
natural language processing
License:
EricR401S
commited on
Commit
·
eb50c8e
1
Parent(s):
16340cb
colab is impossible
Browse files- Pill_Ideologies-Post_Titles.py +16 -20
- README.md +1 -1
- reddit_posts_fm.csv +2 -2
- redditscraper_fm.py +21 -0
Pill_Ideologies-Post_Titles.py
CHANGED
@@ -114,7 +114,6 @@ class SubRedditPosts(datasets.GeneratorBasedBuilder):
|
|
114 |
"ups": datasets.Value("int32"),
|
115 |
"downs": datasets.Value("int32"),
|
116 |
"upvote_ratio": datasets.Value("float32"),
|
117 |
-
"num_reports": datasets.Value("string"),
|
118 |
"is_video": datasets.Value("bool"),
|
119 |
# These are the features of your dataset like images, labels ...
|
120 |
}
|
@@ -135,7 +134,6 @@ class SubRedditPosts(datasets.GeneratorBasedBuilder):
|
|
135 |
"ups": datasets.Value("int32"),
|
136 |
"downs": datasets.Value("int32"),
|
137 |
"upvote_ratio": datasets.Value("float32"),
|
138 |
-
"num_reports": datasets.Value("string"),
|
139 |
"is_video": datasets.Value("bool"),
|
140 |
# These are the features of your dataset like images, labels ...
|
141 |
}
|
@@ -167,23 +165,23 @@ class SubRedditPosts(datasets.GeneratorBasedBuilder):
|
|
167 |
data_dir = dl_manager.download_and_extract(urls)
|
168 |
data = pd.read_csv(data_dir)
|
169 |
|
170 |
-
def clean_data_nans(df):
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
|
186 |
-
clean_data_nans(data)
|
187 |
print("PAssed the cleaning")
|
188 |
# commented out the splits, due to google colab being uncooperative
|
189 |
# raised too many errors that my local machine did not
|
@@ -246,7 +244,6 @@ class SubRedditPosts(datasets.GeneratorBasedBuilder):
|
|
246 |
"ups": row.get("ups"),
|
247 |
"downs": row.get("downs"),
|
248 |
"upvote_ratio": row.get("upvote_ratio"),
|
249 |
-
"num_reports": row.get("num_reports"),
|
250 |
"is_video": row.get("is_video"),
|
251 |
}
|
252 |
|
@@ -265,6 +262,5 @@ class SubRedditPosts(datasets.GeneratorBasedBuilder):
|
|
265 |
"ups": row.get("ups"),
|
266 |
"downs": row.get("downs"),
|
267 |
"upvote_ratio": row.get("upvote_ratio"),
|
268 |
-
"num_reports": row.get("num_reports"),
|
269 |
"is_video": row.get("is_video"),
|
270 |
}
|
|
|
114 |
"ups": datasets.Value("int32"),
|
115 |
"downs": datasets.Value("int32"),
|
116 |
"upvote_ratio": datasets.Value("float32"),
|
|
|
117 |
"is_video": datasets.Value("bool"),
|
118 |
# These are the features of your dataset like images, labels ...
|
119 |
}
|
|
|
134 |
"ups": datasets.Value("int32"),
|
135 |
"downs": datasets.Value("int32"),
|
136 |
"upvote_ratio": datasets.Value("float32"),
|
|
|
137 |
"is_video": datasets.Value("bool"),
|
138 |
# These are the features of your dataset like images, labels ...
|
139 |
}
|
|
|
165 |
data_dir = dl_manager.download_and_extract(urls)
|
166 |
data = pd.read_csv(data_dir)
|
167 |
|
168 |
+
# def clean_data_nans(df):
|
169 |
+
# """This function takes a dataframe and fills all NaNs with a value
|
170 |
+
# This is to appease google colab, because my local machine did not raise errors
|
171 |
+
# ... and it's a windows. That should tell you a lot."""
|
172 |
+
# for col in data.columns:
|
173 |
+
# print(f"Cleaning NaNs in {col}")
|
174 |
+
# if df[col].dtype == "object":
|
175 |
+
# df[col].fillna("NAN -Nothing found", inplace=True)
|
176 |
+
# elif df[col].dtype in ["int64", "float64", "int32", "float32"]:
|
177 |
+
# df[col].fillna(0, inplace=True)
|
178 |
+
# elif df[col].dtype == "bool":
|
179 |
+
# df[col].fillna(False, inplace=True)
|
180 |
+
# else:
|
181 |
+
# df[col].fillna("NAN - problematic {col} found", inplace=True)
|
182 |
+
# return None
|
183 |
|
184 |
+
# clean_data_nans(data)
|
185 |
print("PAssed the cleaning")
|
186 |
# commented out the splits, due to google colab being uncooperative
|
187 |
# raised too many errors that my local machine did not
|
|
|
244 |
"ups": row.get("ups"),
|
245 |
"downs": row.get("downs"),
|
246 |
"upvote_ratio": row.get("upvote_ratio"),
|
|
|
247 |
"is_video": row.get("is_video"),
|
248 |
}
|
249 |
|
|
|
262 |
"ups": row.get("ups"),
|
263 |
"downs": row.get("downs"),
|
264 |
"upvote_ratio": row.get("upvote_ratio"),
|
|
|
265 |
"is_video": row.get("is_video"),
|
266 |
}
|
README.md
CHANGED
@@ -169,7 +169,7 @@ The main usage of this dataset is to study linguistic patterns. Running models a
|
|
169 |
|
170 |
Here is an example analysis notebook showing what can be done with this type of data.
|
171 |
|
172 |
-
Example : []
|
173 |
|
174 |
### Direct Use
|
175 |
|
|
|
169 |
|
170 |
Here is an example analysis notebook showing what can be done with this type of data.
|
171 |
|
172 |
+
Example : [https://colab.research.google.com/drive/15PL9RsmB6HLBvbI6KInq_Az7QhkrXxqi?usp=sharing]
|
173 |
|
174 |
### Direct Use
|
175 |
|
reddit_posts_fm.csv
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5fdfbc4cc7c4d85fb76eeca459c0f05b8b64911e03df2c9d53a204ab6fb0e41f
|
3 |
+
size 11241004
|
redditscraper_fm.py
CHANGED
@@ -163,4 +163,25 @@ df = pd.DataFrame(
|
|
163 |
],
|
164 |
)
|
165 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
df.to_csv("reddit_posts_fm.csv", index=False)
|
|
|
163 |
],
|
164 |
)
|
165 |
|
166 |
+
# columns that cannot be empty,so drop rows
|
167 |
+
df = df.dropna(subset=["subreddit"])
|
168 |
+
df = df.dropna(subset=["title"])
|
169 |
+
df = df.drop(columns=["num_reports"]) # drop num_reports, always empty
|
170 |
+
|
171 |
+
# cleaning to make colab importing the dataset through huggingface work
|
172 |
+
values = {
|
173 |
+
"id": "",
|
174 |
+
"text": "",
|
175 |
+
"url": "",
|
176 |
+
"score": 0,
|
177 |
+
"date": 0.0,
|
178 |
+
"subreddit_subscribers": 0,
|
179 |
+
"num_comments": 0,
|
180 |
+
"ups": 0,
|
181 |
+
"downs": 0,
|
182 |
+
"upvote_ratio": 0.0,
|
183 |
+
"is_video": "False",
|
184 |
+
}
|
185 |
+
df.fillna(value=values, inplace=True)
|
186 |
+
|
187 |
df.to_csv("reddit_posts_fm.csv", index=False)
|