Datasets:
Sub-tasks:
multi-class-classification
Languages:
English
Size:
1K<n<10K
Tags:
natural-language-understanding
ideology classification
text classification
natural language processing
License:
File size: 25,790 Bytes
ceff78e b1213cd ceff78e b1213cd ceff78e b1213cd ceff78e b1213cd ceff78e b1213cd ceff78e b1213cd ceff78e b1213cd ceff78e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 |
{
"cells": [
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"import datasets"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Downloading builder script: 100%|██████████| 9.38k/9.38k [00:00<00:00, 9.38MB/s]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles/blob/main/reddit_posts_fm.csv\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Downloading data: 100%|██████████| 48.0k/48.0k [00:00<00:00, 1.40MB/s]\n",
"Generating train split: 0 examples [00:00, ? examples/s]\n"
]
},
{
"ename": "DatasetGenerationError",
"evalue": "An error occurred while generating the dataset",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mFileNotFoundError\u001b[0m Traceback (most recent call last)",
"File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\builder.py:1726\u001b[0m, in \u001b[0;36mGeneratorBasedBuilder._prepare_split_single\u001b[1;34m(self, gen_kwargs, fpath, file_format, max_shard_size, split_info, check_duplicate_keys, job_id)\u001b[0m\n\u001b[0;32m 1725\u001b[0m _time \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mtime()\n\u001b[1;32m-> 1726\u001b[0m \u001b[43m\u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mkey\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrecord\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mgenerator\u001b[49m\u001b[43m:\u001b[49m\n\u001b[0;32m 1727\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mmax_shard_size\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mis\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mnot\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mand\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mwriter\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_num_bytes\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m>\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mmax_shard_size\u001b[49m\u001b[43m:\u001b[49m\n",
"File \u001b[1;32m~\\.cache\\huggingface\\modules\\datasets_modules\\datasets\\steamcyclone--Pill_Ideologies-Post_Titles\\b9769a66aafdd51743be385ed8b5c1188cf1bb911c2283a4d495a71e5eea207d\\Pill_Ideologies-Post_Titles.py:185\u001b[0m, in \u001b[0;36mSubRedditPosts._generate_examples\u001b[1;34m(self, filepath, split)\u001b[0m\n\u001b[0;32m 182\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_generate_examples\u001b[39m(\u001b[38;5;28mself\u001b[39m, filepath, split):\n\u001b[0;32m 183\u001b[0m \u001b[38;5;66;03m# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.\u001b[39;00m\n\u001b[0;32m 184\u001b[0m \u001b[38;5;66;03m# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.\u001b[39;00m\n\u001b[1;32m--> 185\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28;43mopen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mfilepath\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mencoding\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mutf-8\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mas\u001b[39;00m f:\n\u001b[0;32m 186\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m key, row \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(f):\n",
"File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\streaming.py:75\u001b[0m, in \u001b[0;36mextend_module_for_streaming.<locals>.wrap_auth.<locals>.wrapper\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 73\u001b[0m \u001b[38;5;129m@wraps\u001b[39m(function)\n\u001b[0;32m 74\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mwrapper\u001b[39m(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[1;32m---> 75\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunction\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\download\\streaming_download_manager.py:501\u001b[0m, in \u001b[0;36mxopen\u001b[1;34m(file, mode, download_config, *args, **kwargs)\u001b[0m\n\u001b[0;32m 500\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m is_local_path(main_hop):\n\u001b[1;32m--> 501\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mopen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mmain_hop\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 502\u001b[0m \u001b[38;5;66;03m# add headers and cookies for authentication on the HF Hub and for Google Drive\u001b[39;00m\n",
"\u001b[1;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'C:\\\\Users\\\\ericr\\\\.cache\\\\huggingface\\\\datasets\\\\downloads\\\\4b51286d8928be7cb69e9f832ace34b264a15b9a5d12d1f9c812eee79f9c19e9\\\\train.jsonl'",
"\nThe above exception was the direct cause of the following exception:\n",
"\u001b[1;31mDatasetGenerationError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[1;32mIn[6], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m test \u001b[38;5;241m=\u001b[39m \u001b[43mdatasets\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_dataset\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43msteamcyclone/Pill_Ideologies-Post_Titles\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
"File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\load.py:2549\u001b[0m, in \u001b[0;36mload_dataset\u001b[1;34m(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs)\u001b[0m\n\u001b[0;32m 2546\u001b[0m try_from_hf_gcs \u001b[38;5;241m=\u001b[39m path \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m _PACKAGED_DATASETS_MODULES\n\u001b[0;32m 2548\u001b[0m \u001b[38;5;66;03m# Download and prepare data\u001b[39;00m\n\u001b[1;32m-> 2549\u001b[0m \u001b[43mbuilder_instance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdownload_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 2550\u001b[0m \u001b[43m \u001b[49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2551\u001b[0m \u001b[43m \u001b[49m\u001b[43mdownload_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2552\u001b[0m \u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverification_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2553\u001b[0m \u001b[43m \u001b[49m\u001b[43mtry_from_hf_gcs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtry_from_hf_gcs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2554\u001b[0m \u001b[43m \u001b[49m\u001b[43mnum_proc\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnum_proc\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2555\u001b[0m \u001b[43m \u001b[49m\u001b[43mstorage_options\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstorage_options\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 2556\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 2558\u001b[0m \u001b[38;5;66;03m# Build dataset for splits\u001b[39;00m\n\u001b[0;32m 2559\u001b[0m keep_in_memory \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m 2560\u001b[0m keep_in_memory \u001b[38;5;28;01mif\u001b[39;00m keep_in_memory \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m is_small_dataset(builder_instance\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mdataset_size)\n\u001b[0;32m 2561\u001b[0m )\n",
"File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\builder.py:1005\u001b[0m, in \u001b[0;36mDatasetBuilder.download_and_prepare\u001b[1;34m(self, output_dir, download_config, download_mode, verification_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)\u001b[0m\n\u001b[0;32m 1003\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m num_proc \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m 1004\u001b[0m prepare_split_kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnum_proc\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m num_proc\n\u001b[1;32m-> 1005\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_download_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 1006\u001b[0m \u001b[43m \u001b[49m\u001b[43mdl_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdl_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1007\u001b[0m \u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverification_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1008\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mprepare_split_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1009\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mdownload_and_prepare_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1010\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 1011\u001b[0m \u001b[38;5;66;03m# Sync info\u001b[39;00m\n\u001b[0;32m 1012\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mdataset_size \u001b[38;5;241m=\u001b[39m \u001b[38;5;28msum\u001b[39m(split\u001b[38;5;241m.\u001b[39mnum_bytes \u001b[38;5;28;01mfor\u001b[39;00m split \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39msplits\u001b[38;5;241m.\u001b[39mvalues())\n",
"File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\builder.py:1767\u001b[0m, in \u001b[0;36mGeneratorBasedBuilder._download_and_prepare\u001b[1;34m(self, dl_manager, verification_mode, **prepare_splits_kwargs)\u001b[0m\n\u001b[0;32m 1766\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_download_and_prepare\u001b[39m(\u001b[38;5;28mself\u001b[39m, dl_manager, verification_mode, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mprepare_splits_kwargs):\n\u001b[1;32m-> 1767\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_download_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 1768\u001b[0m \u001b[43m \u001b[49m\u001b[43mdl_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1769\u001b[0m \u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1770\u001b[0m \u001b[43m \u001b[49m\u001b[43mcheck_duplicate_keys\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverification_mode\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m==\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mVerificationMode\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mBASIC_CHECKS\u001b[49m\n\u001b[0;32m 1771\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;129;43;01mor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m==\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mVerificationMode\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mALL_CHECKS\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1772\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mprepare_splits_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1773\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\builder.py:1100\u001b[0m, in \u001b[0;36mDatasetBuilder._download_and_prepare\u001b[1;34m(self, dl_manager, verification_mode, **prepare_split_kwargs)\u001b[0m\n\u001b[0;32m 1096\u001b[0m split_dict\u001b[38;5;241m.\u001b[39madd(split_generator\u001b[38;5;241m.\u001b[39msplit_info)\n\u001b[0;32m 1098\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m 1099\u001b[0m \u001b[38;5;66;03m# Prepare split will record examples associated to the split\u001b[39;00m\n\u001b[1;32m-> 1100\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_prepare_split\u001b[49m\u001b[43m(\u001b[49m\u001b[43msplit_generator\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mprepare_split_kwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 1101\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mOSError\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m 1102\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mOSError\u001b[39;00m(\n\u001b[0;32m 1103\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCannot find data file. \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 1104\u001b[0m \u001b[38;5;241m+\u001b[39m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmanual_download_instructions \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 1105\u001b[0m \u001b[38;5;241m+\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mOriginal error:\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 1106\u001b[0m \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mstr\u001b[39m(e)\n\u001b[0;32m 1107\u001b[0m ) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n",
"File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\builder.py:1605\u001b[0m, in \u001b[0;36mGeneratorBasedBuilder._prepare_split\u001b[1;34m(self, split_generator, check_duplicate_keys, file_format, num_proc, max_shard_size)\u001b[0m\n\u001b[0;32m 1603\u001b[0m job_id \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m\n\u001b[0;32m 1604\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m pbar:\n\u001b[1;32m-> 1605\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mjob_id\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdone\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcontent\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_prepare_split_single\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 1606\u001b[0m \u001b[43m \u001b[49m\u001b[43mgen_kwargs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgen_kwargs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mjob_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mjob_id\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43m_prepare_split_args\u001b[49m\n\u001b[0;32m 1607\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m:\u001b[49m\n\u001b[0;32m 1608\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mdone\u001b[49m\u001b[43m:\u001b[49m\n\u001b[0;32m 1609\u001b[0m \u001b[43m \u001b[49m\u001b[43mresult\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mcontent\u001b[49m\n",
"File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\builder.py:1762\u001b[0m, in \u001b[0;36mGeneratorBasedBuilder._prepare_split_single\u001b[1;34m(self, gen_kwargs, fpath, file_format, max_shard_size, split_info, check_duplicate_keys, job_id)\u001b[0m\n\u001b[0;32m 1760\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(e, SchemaInferenceError) \u001b[38;5;129;01mand\u001b[39;00m e\u001b[38;5;241m.\u001b[39m__context__ \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m 1761\u001b[0m e \u001b[38;5;241m=\u001b[39m e\u001b[38;5;241m.\u001b[39m__context__\n\u001b[1;32m-> 1762\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m DatasetGenerationError(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAn error occurred while generating the dataset\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n\u001b[0;32m 1764\u001b[0m \u001b[38;5;28;01myield\u001b[39;00m job_id, \u001b[38;5;28;01mTrue\u001b[39;00m, (total_num_examples, total_num_bytes, writer\u001b[38;5;241m.\u001b[39m_features, num_shards, shard_lengths)\n",
"\u001b[1;31mDatasetGenerationError\u001b[0m: An error occurred while generating the dataset"
]
}
],
"source": [
"\n",
"\n",
"test = datasets.load_dataset(\"steamcyclone/Pill_Ideologies-Post_Titles\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"df = pd.read_csv(r'C:\\Users\\ericr\\Documents\\STA663Repo\\Hugginface\\Pill-Ideologies-New-Test\\reddit_posts_fm.csv')"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>subreddit</th>\n",
" <th>id</th>\n",
" <th>title</th>\n",
" <th>text</th>\n",
" <th>url</th>\n",
" <th>score</th>\n",
" <th>author</th>\n",
" <th>date</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>theredpillrebooted</td>\n",
" <td>17c1wxt</td>\n",
" <td>My name is Benjamin Persits and I am so sick o...</td>\n",
" <td>Yes, I'm using my full name because I'm so sic...</td>\n",
" <td>NaN</td>\n",
" <td>0</td>\n",
" <td>benjypersits</td>\n",
" <td>2023-10-20 03:40:33</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>theredpillrebooted</td>\n",
" <td>16i06xc</td>\n",
" <td>WHAT THE FUCK!?!?!?!?</td>\n",
" <td>NaN</td>\n",
" <td>https://v.redd.it/8d1eapsih3ob1</td>\n",
" <td>0</td>\n",
" <td>PatchesTheIdiot</td>\n",
" <td>2023-09-13 21:57:56</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>theredpillrebooted</td>\n",
" <td>16a4ekb</td>\n",
" <td>Why is she an asshole with me and how to handl...</td>\n",
" <td>So I got this girl who's kinda my crush but I ...</td>\n",
" <td>NaN</td>\n",
" <td>0</td>\n",
" <td>Worried-Horse-3408</td>\n",
" <td>2023-09-04 21:19:27</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>theredpillrebooted</td>\n",
" <td>15f45sl</td>\n",
" <td>Popular US vlogger Gonzalo Lira aka Coach Red ...</td>\n",
" <td>&amp;#x200B;\\n\\n[https://chng.it/gFyFvqS5K7](h...</td>\n",
" <td>NaN</td>\n",
" <td>0</td>\n",
" <td>Beautiful_Diamond980</td>\n",
" <td>2023-08-01 06:21:36</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>theredpillrebooted</td>\n",
" <td>12wwyzc</td>\n",
" <td>Mrs. Rooster by Stinky Buckets</td>\n",
" <td>NaN</td>\n",
" <td>https://youtube.com/watch?v=jK5BQngWne4&amp;fe...</td>\n",
" <td>1</td>\n",
" <td>LetsGoRedDevils</td>\n",
" <td>2023-04-24 00:50:48</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" subreddit id \\\n",
"0 theredpillrebooted 17c1wxt \n",
"1 theredpillrebooted 16i06xc \n",
"2 theredpillrebooted 16a4ekb \n",
"3 theredpillrebooted 15f45sl \n",
"4 theredpillrebooted 12wwyzc \n",
"\n",
" title \\\n",
"0 My name is Benjamin Persits and I am so sick o... \n",
"1 WHAT THE FUCK!?!?!?!? \n",
"2 Why is she an asshole with me and how to handl... \n",
"3 Popular US vlogger Gonzalo Lira aka Coach Red ... \n",
"4 Mrs. Rooster by Stinky Buckets \n",
"\n",
" text \\\n",
"0 Yes, I'm using my full name because I'm so sic... \n",
"1 NaN \n",
"2 So I got this girl who's kinda my crush but I ... \n",
"3 &#x200B;\\n\\n[https://chng.it/gFyFvqS5K7](h... \n",
"4 NaN \n",
"\n",
" url score \\\n",
"0 NaN 0 \n",
"1 https://v.redd.it/8d1eapsih3ob1 0 \n",
"2 NaN 0 \n",
"3 NaN 0 \n",
"4 https://youtube.com/watch?v=jK5BQngWne4&fe... 1 \n",
"\n",
" author date \n",
"0 benjypersits 2023-10-20 03:40:33 \n",
"1 PatchesTheIdiot 2023-09-13 21:57:56 \n",
"2 Worried-Horse-3408 2023-09-04 21:19:27 \n",
"3 Beautiful_Diamond980 2023-08-01 06:21:36 \n",
"4 LetsGoRedDevils 2023-04-24 00:50:48 "
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# make stratified train, validation, and test sets\n",
"\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"train, test = train_test_split(df, test_size=0.10, stratify=df['subreddit'])\n",
"train, val = train_test_split(train, test_size=0.20, stratify=train['subreddit'])\n",
"\n",
"train.shape, val.shape, test.shape"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "sta663C",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
|