Datasets:
Sub-tasks:
multi-class-classification
Languages:
English
Size:
1K<n<10K
Tags:
natural-language-understanding
ideology classification
text classification
natural language processing
License:
EricR401S
commited on
Commit
·
a80dae0
1
Parent(s):
0918e49
testing generating examples
Browse files
Pill_Ideologies-Post_Titles.py
CHANGED
@@ -55,7 +55,7 @@ _LICENSE = "cc" # cc-by-4.0
|
|
55 |
_URLS = {
|
56 |
# "first_domain": "https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles/blob/main/reddit_posts_fm.csv",
|
57 |
"first_domain": "https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles/raw/main/reddit_posts_fm.csv",
|
58 |
-
"second_domain": "https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles/
|
59 |
}
|
60 |
|
61 |
|
@@ -155,14 +155,14 @@ class SubRedditPosts(datasets.GeneratorBasedBuilder):
|
|
155 |
data_dir = dl_manager.download_and_extract(urls)
|
156 |
print(data_dir, type(data_dir), "checking type")
|
157 |
data = pd.read_csv(data_dir)
|
158 |
-
print("Error post pandas read csv")
|
159 |
-
print(data.head())
|
160 |
train, test = train_test_split(
|
161 |
data, test_size=0.10, stratify=data["subreddit"], random_state=42
|
162 |
)
|
163 |
train, val = train_test_split(
|
164 |
train, test_size=0.20, stratify=train["subreddit"], random_state=42
|
165 |
)
|
|
|
166 |
return [
|
167 |
datasets.SplitGenerator(
|
168 |
name=datasets.Split.TRAIN,
|
@@ -194,6 +194,8 @@ class SubRedditPosts(datasets.GeneratorBasedBuilder):
|
|
194 |
def _generate_examples(self, filepath, split):
|
195 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
196 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
|
|
|
|
197 |
with open(filepath, encoding="utf-8") as f:
|
198 |
for key, row in enumerate(f):
|
199 |
data = json.loads(row)
|
|
|
55 |
_URLS = {
|
56 |
# "first_domain": "https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles/blob/main/reddit_posts_fm.csv",
|
57 |
"first_domain": "https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles/raw/main/reddit_posts_fm.csv",
|
58 |
+
"second_domain": "https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles/raw/main/reddit_posts_fm.csv",
|
59 |
}
|
60 |
|
61 |
|
|
|
155 |
data_dir = dl_manager.download_and_extract(urls)
|
156 |
print(data_dir, type(data_dir), "checking type")
|
157 |
data = pd.read_csv(data_dir)
|
158 |
+
print("no Error post pandas read csv")
|
|
|
159 |
train, test = train_test_split(
|
160 |
data, test_size=0.10, stratify=data["subreddit"], random_state=42
|
161 |
)
|
162 |
train, val = train_test_split(
|
163 |
train, test_size=0.20, stratify=train["subreddit"], random_state=42
|
164 |
)
|
165 |
+
print("splits complete with scikit learn")
|
166 |
return [
|
167 |
datasets.SplitGenerator(
|
168 |
name=datasets.Split.TRAIN,
|
|
|
194 |
def _generate_examples(self, filepath, split):
|
195 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
196 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
197 |
+
print("inside generate examples")
|
198 |
+
print(filepath, split)
|
199 |
with open(filepath, encoding="utf-8") as f:
|
200 |
for key, row in enumerate(f):
|
201 |
data = json.loads(row)
|