EricR401S commited on
Commit
5a4bb8e
β€’
1 Parent(s): 9ca16dd

testing generating examples

Browse files
Files changed (2) hide show
  1. Pill_Ideologies-Post_Titles.py +27 -20
  2. test_notebook.ipynb +109 -17
Pill_Ideologies-Post_Titles.py CHANGED
@@ -191,27 +191,34 @@ class SubRedditPosts(datasets.GeneratorBasedBuilder):
191
  ]
192
 
193
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
194
- def _generate_examples(self, filepath, split):
195
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
196
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
197
  print("inside generate examples")
198
  print(split, "is the split")
199
- print(filepath.shape, "is the filepath")
200
- # with open(filepath, encoding="utf-8") as f:
201
- # for key, row in enumerate(f):
202
- # data = json.loads(row)
203
- # if self.config.name == "first_domain":
204
- # # Yields examples as (key, example) tuples
205
- # yield key, {
206
- # "sentence": data["sentence"],
207
- # "option1": data["option1"],
208
- # "answer": "" if split == "test" else data["answer"],
209
- # }
210
- # else:
211
- # yield key, {
212
- # "sentence": data["sentence"],
213
- # "option2": data["option2"],
214
- # "second_domain_answer": (
215
- # "" if split == "test" else data["second_domain_answer"]
216
- # ),
217
- # }
 
 
 
 
 
 
 
 
191
  ]
192
 
193
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
194
+ def _generate_examples(self, data, split):
195
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
196
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
197
  print("inside generate examples")
198
  print(split, "is the split")
199
+ print(data.shape, "is the filepath")
200
+ for key, row in data.iterrows():
201
+ # print(row, "is the row")
202
+ if self.config.name == "first_domain":
203
+ yield key, {
204
+ "subreddit": row.get("subreddit"),
205
+ "id": row.get("id"),
206
+ "title": row.get("title"),
207
+ "text": row("text"),
208
+ "url": row("url"),
209
+ "score": row.get("score"),
210
+ "author": row.get("author"),
211
+ "date": row.get("date"),
212
+ }
213
+
214
+ else:
215
+ yield key, {
216
+ "subreddit": row.get("subreddit"),
217
+ "id": row.get("id"),
218
+ "title": row.get("title"),
219
+ "text": row("text"),
220
+ "url": row("url"),
221
+ "score": row.get("score"),
222
+ "author": row.get("author"),
223
+ "date": row.get("date"),
224
+ }
test_notebook.ipynb CHANGED
@@ -2,7 +2,7 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 3,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
@@ -11,29 +11,47 @@
11
  },
12
  {
13
  "cell_type": "code",
14
- "execution_count": 6,
15
  "metadata": {},
16
  "outputs": [
17
  {
18
  "name": "stderr",
19
  "output_type": "stream",
20
  "text": [
21
- "Downloading builder script: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 9.38k/9.38k [00:00<00:00, 9.38MB/s]\n"
22
  ]
23
  },
24
  {
25
  "name": "stdout",
26
  "output_type": "stream",
27
  "text": [
28
- "https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles/blob/main/reddit_posts_fm.csv\n"
 
 
 
29
  ]
30
  },
31
  {
32
  "name": "stderr",
33
  "output_type": "stream",
34
  "text": [
35
- "Downloading data: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 48.0k/48.0k [00:00<00:00, 1.40MB/s]\n",
36
- "Generating train split: 0 examples [00:00, ? examples/s]\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  ]
38
  },
39
  {
@@ -42,15 +60,12 @@
42
  "output_type": "error",
43
  "traceback": [
44
  "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
45
- "\u001b[1;31mFileNotFoundError\u001b[0m Traceback (most recent call last)",
46
  "File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\builder.py:1726\u001b[0m, in \u001b[0;36mGeneratorBasedBuilder._prepare_split_single\u001b[1;34m(self, gen_kwargs, fpath, file_format, max_shard_size, split_info, check_duplicate_keys, job_id)\u001b[0m\n\u001b[0;32m 1725\u001b[0m _time \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mtime()\n\u001b[1;32m-> 1726\u001b[0m \u001b[43m\u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mkey\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrecord\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mgenerator\u001b[49m\u001b[43m:\u001b[49m\n\u001b[0;32m 1727\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mmax_shard_size\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mis\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mnot\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mand\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mwriter\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_num_bytes\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m>\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mmax_shard_size\u001b[49m\u001b[43m:\u001b[49m\n",
47
- "File \u001b[1;32m~\\.cache\\huggingface\\modules\\datasets_modules\\datasets\\steamcyclone--Pill_Ideologies-Post_Titles\\b9769a66aafdd51743be385ed8b5c1188cf1bb911c2283a4d495a71e5eea207d\\Pill_Ideologies-Post_Titles.py:185\u001b[0m, in \u001b[0;36mSubRedditPosts._generate_examples\u001b[1;34m(self, filepath, split)\u001b[0m\n\u001b[0;32m 182\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_generate_examples\u001b[39m(\u001b[38;5;28mself\u001b[39m, filepath, split):\n\u001b[0;32m 183\u001b[0m \u001b[38;5;66;03m# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.\u001b[39;00m\n\u001b[0;32m 184\u001b[0m \u001b[38;5;66;03m# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.\u001b[39;00m\n\u001b[1;32m--> 185\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28;43mopen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mfilepath\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mencoding\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mutf-8\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mas\u001b[39;00m f:\n\u001b[0;32m 186\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m key, row \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(f):\n",
48
- "File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\streaming.py:75\u001b[0m, in \u001b[0;36mextend_module_for_streaming.<locals>.wrap_auth.<locals>.wrapper\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 73\u001b[0m \u001b[38;5;129m@wraps\u001b[39m(function)\n\u001b[0;32m 74\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mwrapper\u001b[39m(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[1;32m---> 75\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunction\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
49
- "File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\download\\streaming_download_manager.py:501\u001b[0m, in \u001b[0;36mxopen\u001b[1;34m(file, mode, download_config, *args, **kwargs)\u001b[0m\n\u001b[0;32m 500\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m is_local_path(main_hop):\n\u001b[1;32m--> 501\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mopen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mmain_hop\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 502\u001b[0m \u001b[38;5;66;03m# add headers and cookies for authentication on the HF Hub and for Google Drive\u001b[39;00m\n",
50
- "\u001b[1;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'C:\\\\Users\\\\ericr\\\\.cache\\\\huggingface\\\\datasets\\\\downloads\\\\4b51286d8928be7cb69e9f832ace34b264a15b9a5d12d1f9c812eee79f9c19e9\\\\train.jsonl'",
51
  "\nThe above exception was the direct cause of the following exception:\n",
52
  "\u001b[1;31mDatasetGenerationError\u001b[0m Traceback (most recent call last)",
53
- "Cell \u001b[1;32mIn[6], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m test \u001b[38;5;241m=\u001b[39m \u001b[43mdatasets\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_dataset\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43msteamcyclone/Pill_Ideologies-Post_Titles\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
54
  "File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\load.py:2549\u001b[0m, in \u001b[0;36mload_dataset\u001b[1;34m(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs)\u001b[0m\n\u001b[0;32m 2546\u001b[0m try_from_hf_gcs \u001b[38;5;241m=\u001b[39m path \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m _PACKAGED_DATASETS_MODULES\n\u001b[0;32m 2548\u001b[0m \u001b[38;5;66;03m# Download and prepare data\u001b[39;00m\n\u001b[1;32m-> 2549\u001b[0m \u001b[43mbuilder_instance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdownload_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 2550\u001b[0m \u001b[43m \u001b[49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2551\u001b[0m \u001b[43m \u001b[49m\u001b[43mdownload_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2552\u001b[0m \u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverification_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2553\u001b[0m \u001b[43m \u001b[49m\u001b[43mtry_from_hf_gcs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtry_from_hf_gcs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2554\u001b[0m \u001b[43m \u001b[49m\u001b[43mnum_proc\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnum_proc\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2555\u001b[0m \u001b[43m \u001b[49m\u001b[43mstorage_options\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstorage_options\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2556\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 2558\u001b[0m \u001b[38;5;66;03m# Build dataset for splits\u001b[39;00m\n\u001b[0;32m 2559\u001b[0m keep_in_memory \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m 2560\u001b[0m keep_in_memory \u001b[38;5;28;01mif\u001b[39;00m keep_in_memory \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m is_small_dataset(builder_instance\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mdataset_size)\n\u001b[0;32m 2561\u001b[0m )\n",
55
  "File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\builder.py:1005\u001b[0m, in \u001b[0;36mDatasetBuilder.download_and_prepare\u001b[1;34m(self, output_dir, download_config, download_mode, verification_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)\u001b[0m\n\u001b[0;32m 1003\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m num_proc \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m 1004\u001b[0m prepare_split_kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnum_proc\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m num_proc\n\u001b[1;32m-> 1005\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_download_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 1006\u001b[0m \u001b[43m \u001b[49m\u001b[43mdl_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdl_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1007\u001b[0m \u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverification_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1008\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mprepare_split_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1009\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mdownload_and_prepare_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1010\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 1011\u001b[0m \u001b[38;5;66;03m# Sync info\u001b[39;00m\n\u001b[0;32m 1012\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mdataset_size \u001b[38;5;241m=\u001b[39m \u001b[38;5;28msum\u001b[39m(split\u001b[38;5;241m.\u001b[39mnum_bytes \u001b[38;5;28;01mfor\u001b[39;00m split \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39msplits\u001b[38;5;241m.\u001b[39mvalues())\n",
56
  "File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\builder.py:1767\u001b[0m, in \u001b[0;36mGeneratorBasedBuilder._download_and_prepare\u001b[1;34m(self, dl_manager, verification_mode, **prepare_splits_kwargs)\u001b[0m\n\u001b[0;32m 1766\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_download_and_prepare\u001b[39m(\u001b[38;5;28mself\u001b[39m, dl_manager, verification_mode, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mprepare_splits_kwargs):\n\u001b[1;32m-> 1767\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_download_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 1768\u001b[0m \u001b[43m \u001b[49m\u001b[43mdl_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1769\u001b[0m \u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1770\u001b[0m \u001b[43m \u001b[49m\u001b[43mcheck_duplicate_keys\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverification_mode\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m==\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mVerificationMode\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mBASIC_CHECKS\u001b[49m\n\u001b[0;32m 1771\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;129;43;01mor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m==\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mVerificationMode\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mALL_CHECKS\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1772\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mprepare_splits_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1773\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
@@ -64,7 +79,7 @@
64
  "source": [
65
  "\n",
66
  "\n",
67
- "test = datasets.load_dataset(\"steamcyclone/Pill_Ideologies-Post_Titles\")"
68
  ]
69
  },
70
  {
@@ -236,19 +251,96 @@
236
  },
237
  {
238
  "cell_type": "code",
239
- "execution_count": null,
240
  "metadata": {},
241
- "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
242
  "source": [
243
  "# make stratified train, validation, and test sets\n",
244
  "\n",
245
  "from sklearn.model_selection import train_test_split\n",
246
  "\n",
247
- "train, test = train_test_split(df, test_size=0.10, stratify=df['subreddit'])\n",
248
- "train, val = train_test_split(train, test_size=0.20, stratify=train['subreddit'])\n",
249
  "\n",
250
  "train.shape, val.shape, test.shape"
251
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
  }
253
  ],
254
  "metadata": {
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 21,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
 
11
  },
12
  {
13
  "cell_type": "code",
14
+ "execution_count": 32,
15
  "metadata": {},
16
  "outputs": [
17
  {
18
  "name": "stderr",
19
  "output_type": "stream",
20
  "text": [
21
+ "Downloading builder script: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 10.1k/10.1k [00:00<00:00, 9.99MB/s]\n"
22
  ]
23
  },
24
  {
25
  "name": "stdout",
26
  "output_type": "stream",
27
  "text": [
28
+ "https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles/raw/main/reddit_posts_fm.csv\n",
29
+ "C:\\Users\\ericr\\.cache\\huggingface\\datasets\\downloads\\0d4d993845dca9fad020a0cdc59de50781db2df85a57686ea495bc3a11e12dd8 <class 'datasets.utils.track.tracked_str'> checking type\n",
30
+ "no Error post pandas read csv\n",
31
+ "splits complete with scikit learn\n"
32
  ]
33
  },
34
  {
35
  "name": "stderr",
36
  "output_type": "stream",
37
  "text": [
38
+ "Generating train split: 0 examples [00:00, ? examples/s]"
39
+ ]
40
+ },
41
+ {
42
+ "name": "stdout",
43
+ "output_type": "stream",
44
+ "text": [
45
+ "inside generate examples\n",
46
+ "train is the split\n",
47
+ "(4499, 8) is the filepath\n"
48
+ ]
49
+ },
50
+ {
51
+ "name": "stderr",
52
+ "output_type": "stream",
53
+ "text": [
54
+ "\n"
55
  ]
56
  },
57
  {
 
60
  "output_type": "error",
61
  "traceback": [
62
  "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
63
+ "\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)",
64
  "File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\builder.py:1726\u001b[0m, in \u001b[0;36mGeneratorBasedBuilder._prepare_split_single\u001b[1;34m(self, gen_kwargs, fpath, file_format, max_shard_size, split_info, check_duplicate_keys, job_id)\u001b[0m\n\u001b[0;32m 1725\u001b[0m _time \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mtime()\n\u001b[1;32m-> 1726\u001b[0m \u001b[43m\u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mkey\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrecord\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mgenerator\u001b[49m\u001b[43m:\u001b[49m\n\u001b[0;32m 1727\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mmax_shard_size\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mis\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mnot\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mand\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mwriter\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_num_bytes\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m>\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mmax_shard_size\u001b[49m\u001b[43m:\u001b[49m\n",
65
+ "\u001b[1;31mTypeError\u001b[0m: 'NoneType' object is not iterable",
 
 
 
66
  "\nThe above exception was the direct cause of the following exception:\n",
67
  "\u001b[1;31mDatasetGenerationError\u001b[0m Traceback (most recent call last)",
68
+ "Cell \u001b[1;32mIn[32], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m test \u001b[38;5;241m=\u001b[39m \u001b[43mdatasets\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_dataset\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43msteamcyclone/Pill_Ideologies-Post_Titles\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtrust_remote_code\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\n",
69
  "File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\load.py:2549\u001b[0m, in \u001b[0;36mload_dataset\u001b[1;34m(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs)\u001b[0m\n\u001b[0;32m 2546\u001b[0m try_from_hf_gcs \u001b[38;5;241m=\u001b[39m path \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m _PACKAGED_DATASETS_MODULES\n\u001b[0;32m 2548\u001b[0m \u001b[38;5;66;03m# Download and prepare data\u001b[39;00m\n\u001b[1;32m-> 2549\u001b[0m \u001b[43mbuilder_instance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdownload_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 2550\u001b[0m \u001b[43m \u001b[49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2551\u001b[0m \u001b[43m \u001b[49m\u001b[43mdownload_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2552\u001b[0m \u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverification_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2553\u001b[0m \u001b[43m \u001b[49m\u001b[43mtry_from_hf_gcs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtry_from_hf_gcs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2554\u001b[0m \u001b[43m \u001b[49m\u001b[43mnum_proc\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnum_proc\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2555\u001b[0m \u001b[43m \u001b[49m\u001b[43mstorage_options\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstorage_options\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2556\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 2558\u001b[0m \u001b[38;5;66;03m# Build dataset for splits\u001b[39;00m\n\u001b[0;32m 2559\u001b[0m keep_in_memory \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m 2560\u001b[0m keep_in_memory \u001b[38;5;28;01mif\u001b[39;00m keep_in_memory \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m is_small_dataset(builder_instance\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mdataset_size)\n\u001b[0;32m 2561\u001b[0m )\n",
70
  "File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\builder.py:1005\u001b[0m, in \u001b[0;36mDatasetBuilder.download_and_prepare\u001b[1;34m(self, output_dir, download_config, download_mode, verification_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)\u001b[0m\n\u001b[0;32m 1003\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m num_proc \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m 1004\u001b[0m prepare_split_kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnum_proc\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m num_proc\n\u001b[1;32m-> 1005\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_download_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 1006\u001b[0m \u001b[43m \u001b[49m\u001b[43mdl_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdl_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1007\u001b[0m \u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverification_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1008\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mprepare_split_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1009\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mdownload_and_prepare_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1010\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 1011\u001b[0m \u001b[38;5;66;03m# Sync info\u001b[39;00m\n\u001b[0;32m 1012\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mdataset_size \u001b[38;5;241m=\u001b[39m \u001b[38;5;28msum\u001b[39m(split\u001b[38;5;241m.\u001b[39mnum_bytes \u001b[38;5;28;01mfor\u001b[39;00m split \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39msplits\u001b[38;5;241m.\u001b[39mvalues())\n",
71
  "File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\builder.py:1767\u001b[0m, in \u001b[0;36mGeneratorBasedBuilder._download_and_prepare\u001b[1;34m(self, dl_manager, verification_mode, **prepare_splits_kwargs)\u001b[0m\n\u001b[0;32m 1766\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_download_and_prepare\u001b[39m(\u001b[38;5;28mself\u001b[39m, dl_manager, verification_mode, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mprepare_splits_kwargs):\n\u001b[1;32m-> 1767\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_download_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 1768\u001b[0m \u001b[43m \u001b[49m\u001b[43mdl_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1769\u001b[0m \u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1770\u001b[0m \u001b[43m \u001b[49m\u001b[43mcheck_duplicate_keys\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverification_mode\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m==\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mVerificationMode\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mBASIC_CHECKS\u001b[49m\n\u001b[0;32m 1771\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;129;43;01mor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m==\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mVerificationMode\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mALL_CHECKS\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1772\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mprepare_splits_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1773\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
 
79
  "source": [
80
  "\n",
81
  "\n",
82
+ "test = datasets.load_dataset(\"steamcyclone/Pill_Ideologies-Post_Titles\", trust_remote_code=True)"
83
  ]
84
  },
85
  {
 
251
  },
252
  {
253
  "cell_type": "code",
254
+ "execution_count": 14,
255
  "metadata": {},
256
+ "outputs": [
257
+ {
258
+ "data": {
259
+ "text/plain": [
260
+ "((4499, 8), (1125, 8), (625, 8))"
261
+ ]
262
+ },
263
+ "execution_count": 14,
264
+ "metadata": {},
265
+ "output_type": "execute_result"
266
+ }
267
+ ],
268
  "source": [
269
  "# make stratified train, validation, and test sets\n",
270
  "\n",
271
  "from sklearn.model_selection import train_test_split\n",
272
  "\n",
273
+ "train, test = train_test_split(df, test_size=0.10, stratify=df['subreddit'], random_state=42)\n",
274
+ "train, val = train_test_split(train, test_size=0.20, stratify=train['subreddit'], random_state=42)\n",
275
  "\n",
276
  "train.shape, val.shape, test.shape"
277
  ]
278
+ },
279
+ {
280
+ "cell_type": "code",
281
+ "execution_count": 43,
282
+ "metadata": {},
283
+ "outputs": [
284
+ {
285
+ "data": {
286
+ "text/plain": [
287
+ "array(['theredpillrebooted', '17c1wxt',\n",
288
+ " 'My name is Benjamin Persits and I am so sick of women.',\n",
289
+ " 'Yes, I\\'m using my full name because I\\'m so sick off all of this. The women in my dorm have become so obnoxious. They are so fucking annoying. I try to talk to any of them and it\\'s all \"sexual harassment\" and all that. I saw a cute girl by my dorm room sink the other day and we started talking. She wanders off and I walk after her while we chat. All of a sudden this is sexual harassment. Or when girls come on to me and when I lean in to kiss they don\\'t want to all of a sudden. Absolutely ridiculous. I\\'m not going to be quiet about feeling this way anymore. I\\'m sick of dealing with women, in my classes, anywhere. I\\'m so done. ',\n",
290
+ " nan, 0, 'benjypersits', '2023-10-20 03:40:33'], dtype=object)"
291
+ ]
292
+ },
293
+ "execution_count": 43,
294
+ "metadata": {},
295
+ "output_type": "execute_result"
296
+ }
297
+ ],
298
+ "source": [
299
+ "next(df.iterrows())[1].values"
300
+ ]
301
+ },
302
+ {
303
+ "cell_type": "code",
304
+ "execution_count": 44,
305
+ "metadata": {},
306
+ "outputs": [
307
+ {
308
+ "data": {
309
+ "text/plain": [
310
+ "<generator object DataFrame.iterrows at 0x000001A84F706F00>"
311
+ ]
312
+ },
313
+ "execution_count": 44,
314
+ "metadata": {},
315
+ "output_type": "execute_result"
316
+ }
317
+ ],
318
+ "source": [
319
+ "df.iterrows()"
320
+ ]
321
+ },
322
+ {
323
+ "cell_type": "code",
324
+ "execution_count": 47,
325
+ "metadata": {},
326
+ "outputs": [],
327
+ "source": [
328
+ "stop = 10\n",
329
+ "\n",
330
+ "for key, row in df.iterrows():\n",
331
+ " # print(row.values)\n",
332
+ " print(row.get('subreddit'))\n",
333
+ " stop -= 1\n",
334
+ " if stop == 0:\n",
335
+ " break"
336
+ ]
337
+ },
338
+ {
339
+ "cell_type": "code",
340
+ "execution_count": null,
341
+ "metadata": {},
342
+ "outputs": [],
343
+ "source": []
344
  }
345
  ],
346
  "metadata": {