mmcquade commited on
Commit
8be0083
1 Parent(s): 33c86a9

Initial commit

Browse files
.DS_Store ADDED
Binary file (6.15 kB). View file
.gitattributes CHANGED
@@ -27,3 +27,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
  *.tar.gz filter=lfs diff=lfs merge=lfs -text
29
  *.pkl filter=lfs diff=lfs merge=lfs -text
 
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
  *.tar.gz filter=lfs diff=lfs merge=lfs -text
29
  *.pkl filter=lfs diff=lfs merge=lfs -text
30
+ *.csv filter=lfs diff=lfs merge=lfs -text
code/reuters_dataset_prep.ipynb ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 2,
6
+ "id": "1383f909",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "Requirement already satisfied: transformers in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (4.11.3)\n",
14
+ "Collecting datasets\n",
15
+ " Downloading datasets-1.15.1-py3-none-any.whl (290 kB)\n",
16
+ "\u001b[K |████████████████████████████████| 290 kB 2.4 MB/s eta 0:00:01\n",
17
+ "\u001b[?25hCollecting fsspec[http]>=2021.05.0\n",
18
+ " Downloading fsspec-2021.11.0-py3-none-any.whl (132 kB)\n",
19
+ "\u001b[K |████████████████████████████████| 132 kB 3.8 MB/s eta 0:00:01\n",
20
+ "\u001b[?25hCollecting multiprocess\n",
21
+ " Downloading multiprocess-0.70.12.2-py38-none-any.whl (128 kB)\n",
22
+ "\u001b[K |████████████████████████████████| 128 kB 5.5 MB/s eta 0:00:01\n",
23
+ "\u001b[?25hCollecting dill\n",
24
+ " Downloading dill-0.3.4-py2.py3-none-any.whl (86 kB)\n",
25
+ "\u001b[K |████████████████████████████████| 86 kB 4.4 MB/s eta 0:00:01\n",
26
+ "\u001b[?25hRequirement already satisfied: pandas in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from datasets) (1.2.4)\n",
27
+ "Collecting huggingface-hub<1.0.0,>=0.1.0\n",
28
+ " Downloading huggingface_hub-0.1.2-py3-none-any.whl (59 kB)\n",
29
+ "\u001b[K |████████████████████████████████| 59 kB 4.4 MB/s eta 0:00:01\n",
30
+ "\u001b[?25hCollecting xxhash\n",
31
+ " Downloading xxhash-2.0.2-cp38-cp38-macosx_10_9_x86_64.whl (31 kB)\n",
32
+ "Requirement already satisfied: requests>=2.19.0 in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from datasets) (2.25.1)\n",
33
+ "Collecting tqdm>=4.62.1\n",
34
+ " Using cached tqdm-4.62.3-py2.py3-none-any.whl (76 kB)\n",
35
+ "Collecting pyarrow!=4.0.0,>=1.0.0\n",
36
+ " Downloading pyarrow-6.0.1-cp38-cp38-macosx_10_13_x86_64.whl (19.1 MB)\n",
37
+ "\u001b[K |████████████████████████████████| 19.1 MB 5.9 MB/s eta 0:00:01\n",
38
+ "\u001b[?25hRequirement already satisfied: numpy>=1.17 in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from datasets) (1.20.1)\n",
39
+ "Collecting aiohttp\n",
40
+ " Downloading aiohttp-3.8.1-cp38-cp38-macosx_10_9_x86_64.whl (574 kB)\n",
41
+ "\u001b[K |████████████████████████████████| 574 kB 5.0 MB/s eta 0:00:01\n",
42
+ "\u001b[?25hRequirement already satisfied: packaging in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from datasets) (20.9)\n",
43
+ "Requirement already satisfied: pyyaml in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets) (5.4.1)\n",
44
+ "Requirement already satisfied: typing-extensions>=3.7.4.3 in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets) (3.7.4.3)\n",
45
+ "Requirement already satisfied: filelock in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets) (3.0.12)\n",
46
+ "Requirement already satisfied: pyparsing>=2.0.2 in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from packaging->datasets) (2.4.7)\n",
47
+ "Requirement already satisfied: idna<3,>=2.5 in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (2.10)\n",
48
+ "Requirement already satisfied: certifi>=2017.4.17 in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (2020.12.5)\n",
49
+ "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (1.26.4)\n",
50
+ "Requirement already satisfied: chardet<5,>=3.0.2 in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (4.0.0)\n",
51
+ "Requirement already satisfied: tokenizers<0.11,>=0.10.1 in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from transformers) (0.10.3)\n",
52
+ "Requirement already satisfied: sacremoses in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from transformers) (0.0.46)\n",
53
+ "Requirement already satisfied: regex!=2019.12.17 in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from transformers) (2021.4.4)\n",
54
+ "Collecting charset-normalizer<3.0,>=2.0\n",
55
+ " Using cached charset_normalizer-2.0.7-py3-none-any.whl (38 kB)\n",
56
+ "Collecting frozenlist>=1.1.1\n",
57
+ " Downloading frozenlist-1.2.0-cp38-cp38-macosx_10_9_x86_64.whl (81 kB)\n",
58
+ "\u001b[K |████████████████████████████████| 81 kB 7.0 MB/s eta 0:00:011\n",
59
+ "\u001b[?25hCollecting multidict<7.0,>=4.5\n",
60
+ " Downloading multidict-5.2.0-cp38-cp38-macosx_10_9_x86_64.whl (45 kB)\n",
61
+ "\u001b[K |████████████████████████████████| 45 kB 4.0 MB/s eta 0:00:011\n",
62
+ "\u001b[?25hCollecting yarl<2.0,>=1.0\n",
63
+ " Downloading yarl-1.7.2-cp38-cp38-macosx_10_9_x86_64.whl (121 kB)\n",
64
+ "\u001b[K |████████████████████████████████| 121 kB 5.8 MB/s eta 0:00:01\n",
65
+ "\u001b[?25hRequirement already satisfied: attrs>=17.3.0 in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from aiohttp->datasets) (20.3.0)\n",
66
+ "Collecting async-timeout<5.0,>=4.0.0a3\n",
67
+ " Downloading async_timeout-4.0.1-py3-none-any.whl (5.7 kB)\n",
68
+ "Collecting aiosignal>=1.1.2\n",
69
+ " Downloading aiosignal-1.2.0-py3-none-any.whl (8.2 kB)\n",
70
+ "Requirement already satisfied: python-dateutil>=2.7.3 in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from pandas->datasets) (2.8.1)\n",
71
+ "Requirement already satisfied: pytz>=2017.3 in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from pandas->datasets) (2021.1)\n",
72
+ "Requirement already satisfied: six>=1.5 in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from python-dateutil>=2.7.3->pandas->datasets) (1.15.0)\n",
73
+ "Requirement already satisfied: joblib in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from sacremoses->transformers) (1.0.1)\n",
74
+ "Requirement already satisfied: click in /Users/markmcquade/opt/anaconda3/lib/python3.8/site-packages (from sacremoses->transformers) (7.1.2)\n",
75
+ "Installing collected packages: multidict, frozenlist, yarl, charset-normalizer, async-timeout, aiosignal, tqdm, fsspec, dill, aiohttp, xxhash, pyarrow, multiprocess, huggingface-hub, datasets\n",
76
+ " Attempting uninstall: tqdm\n",
77
+ " Found existing installation: tqdm 4.59.0\n",
78
+ " Uninstalling tqdm-4.59.0:\n",
79
+ " Successfully uninstalled tqdm-4.59.0\n",
80
+ " Attempting uninstall: fsspec\n",
81
+ " Found existing installation: fsspec 0.9.0\n",
82
+ " Uninstalling fsspec-0.9.0:\n",
83
+ " Successfully uninstalled fsspec-0.9.0\n",
84
+ " Attempting uninstall: huggingface-hub\n",
85
+ " Found existing installation: huggingface-hub 0.0.19\n",
86
+ " Uninstalling huggingface-hub-0.0.19:\n",
87
+ " Successfully uninstalled huggingface-hub-0.0.19\n",
88
+ "Successfully installed aiohttp-3.8.1 aiosignal-1.2.0 async-timeout-4.0.1 charset-normalizer-2.0.7 datasets-1.15.1 dill-0.3.4 frozenlist-1.2.0 fsspec-2021.11.0 huggingface-hub-0.1.2 multidict-5.2.0 multiprocess-0.70.12.2 pyarrow-6.0.1 tqdm-4.62.3 xxhash-2.0.2 yarl-1.7.2\n"
89
+ ]
90
+ }
91
+ ],
92
+ "source": [
93
+ "!pip install transformers datasets"
94
+ ]
95
+ },
96
+ {
97
+ "cell_type": "code",
98
+ "execution_count": 3,
99
+ "id": "d0ec8542",
100
+ "metadata": {},
101
+ "outputs": [],
102
+ "source": [
103
+ "import datasets"
104
+ ]
105
+ },
106
+ {
107
+ "cell_type": "code",
108
+ "execution_count": 6,
109
+ "id": "248be0df",
110
+ "metadata": {},
111
+ "outputs": [
112
+ {
113
+ "name": "stderr",
114
+ "output_type": "stream",
115
+ "text": [
116
+ "Reusing dataset reuters21578 (/Users/markmcquade/.cache/huggingface/datasets/reuters21578/ModHayes/1.0.0/bd91fac5a25fc818873c02a7281cc276c9b326a9e6a89288fc6ba6967772240f)\n"
117
+ ]
118
+ },
119
+ {
120
+ "data": {
121
+ "application/vnd.jupyter.widget-view+json": {
122
+ "model_id": "6cbd2502b9d54505b2a60aa8f809a2f5",
123
+ "version_major": 2,
124
+ "version_minor": 0
125
+ },
126
+ "text/plain": [
127
+ " 0%| | 0/2 [00:00<?, ?it/s]"
128
+ ]
129
+ },
130
+ "metadata": {},
131
+ "output_type": "display_data"
132
+ }
133
+ ],
134
+ "source": [
135
+ "from datasets import load_dataset\n",
136
+ "\n",
137
+ "dataset = load_dataset(\"reuters21578\", \"ModHayes\")"
138
+ ]
139
+ },
140
+ {
141
+ "cell_type": "code",
142
+ "execution_count": 7,
143
+ "id": "906c5716",
144
+ "metadata": {},
145
+ "outputs": [
146
+ {
147
+ "data": {
148
+ "text/plain": [
149
+ "DatasetDict({\n",
150
+ " test: Dataset({\n",
151
+ " features: ['text', 'text_type', 'topics', 'lewis_split', 'cgis_split', 'old_id', 'new_id', 'places', 'people', 'orgs', 'exchanges', 'date', 'title'],\n",
152
+ " num_rows: 722\n",
153
+ " })\n",
154
+ " train: Dataset({\n",
155
+ " features: ['text', 'text_type', 'topics', 'lewis_split', 'cgis_split', 'old_id', 'new_id', 'places', 'people', 'orgs', 'exchanges', 'date', 'title'],\n",
156
+ " num_rows: 20856\n",
157
+ " })\n",
158
+ "})"
159
+ ]
160
+ },
161
+ "execution_count": 7,
162
+ "metadata": {},
163
+ "output_type": "execute_result"
164
+ }
165
+ ],
166
+ "source": [
167
+ "dataset"
168
+ ]
169
+ },
170
+ {
171
+ "cell_type": "markdown",
172
+ "id": "ee660fd8",
173
+ "metadata": {},
174
+ "source": [
175
+ "Remove unwanted columns"
176
+ ]
177
+ },
178
+ {
179
+ "cell_type": "code",
180
+ "execution_count": 8,
181
+ "id": "5a66ae17",
182
+ "metadata": {},
183
+ "outputs": [],
184
+ "source": [
185
+ "dataset = dataset.remove_columns(\n",
186
+ "['text_type', 'topics', 'lewis_split', 'cgis_split', 'old_id', \n",
187
+ " 'new_id', 'places', 'people', 'orgs', 'exchanges', 'date'\n",
188
+ "])\n",
189
+ "\n",
190
+ "dataset = dataset.rename_column('title', 'target')"
191
+ ]
192
+ },
193
+ {
194
+ "cell_type": "code",
195
+ "execution_count": 9,
196
+ "id": "bae56dc5",
197
+ "metadata": {},
198
+ "outputs": [
199
+ {
200
+ "data": {
201
+ "text/plain": [
202
+ "DatasetDict({\n",
203
+ " test: Dataset({\n",
204
+ " features: ['text', 'target'],\n",
205
+ " num_rows: 722\n",
206
+ " })\n",
207
+ " train: Dataset({\n",
208
+ " features: ['text', 'target'],\n",
209
+ " num_rows: 20856\n",
210
+ " })\n",
211
+ "})"
212
+ ]
213
+ },
214
+ "execution_count": 9,
215
+ "metadata": {},
216
+ "output_type": "execute_result"
217
+ }
218
+ ],
219
+ "source": [
220
+ "dataset"
221
+ ]
222
+ },
223
+ {
224
+ "cell_type": "code",
225
+ "execution_count": 20,
226
+ "id": "2d89fd84",
227
+ "metadata": {},
228
+ "outputs": [
229
+ {
230
+ "data": {
231
+ "text/plain": [
232
+ "{'text': 'Standard Oil Co and BP North America\\nInc said they plan to form a venture to manage the money market\\nborrowing and investment activities of both companies.\\n BP North America is a subsidiary of British Petroleum Co\\nPlc &lt;BP>, which also owns a 55 pct interest in Standard Oil.\\n The venture will be called BP/Standard Financial Trading\\nand will be operated by Standard Oil under the oversight of a\\njoint management committee.\\n\\n Reuter\\n',\n",
233
+ " 'target': 'STANDARD OIL &lt;SRD> TO FORM FINANCIAL UNIT'}"
234
+ ]
235
+ },
236
+ "execution_count": 20,
237
+ "metadata": {},
238
+ "output_type": "execute_result"
239
+ }
240
+ ],
241
+ "source": [
242
+ "dataset['train'][1]"
243
+ ]
244
+ },
245
+ {
246
+ "cell_type": "markdown",
247
+ "id": "4151eddf",
248
+ "metadata": {},
249
+ "source": [
250
+ "Drop new lines and tabs, replace with white space. Remove commas, remove quotes. Replace \"Reuter\" from end of text. Drop extra white space agains. Remove html tags"
251
+ ]
252
+ },
253
+ {
254
+ "cell_type": "code",
255
+ "execution_count": 22,
256
+ "id": "76bc67d3",
257
+ "metadata": {},
258
+ "outputs": [],
259
+ "source": [
260
+ "def clean(row):\n",
261
+ " row['text'] = row['text'].replace('\\n',' ').replace('\\t',' ')\\\n",
262
+ " .replace(',','').replace('\\'','').replace('\\\"','')\\\n",
263
+ " .replace(' Reuter','').replace(' REUTER','')\n",
264
+ " row['text'] = \" \".join(row['text'].split())\n",
265
+ " row['target'] = row['target'].replace('&lt;','<').replace('&gt;','>')\n",
266
+ " return row"
267
+ ]
268
+ },
269
+ {
270
+ "cell_type": "code",
271
+ "execution_count": 23,
272
+ "id": "40bff5a5",
273
+ "metadata": {},
274
+ "outputs": [
275
+ {
276
+ "data": {
277
+ "application/vnd.jupyter.widget-view+json": {
278
+ "model_id": "7a4b761c23b84a138fa74ef9dfe9ed49",
279
+ "version_major": 2,
280
+ "version_minor": 0
281
+ },
282
+ "text/plain": [
283
+ " 0%| | 0/722 [00:00<?, ?ex/s]"
284
+ ]
285
+ },
286
+ "metadata": {},
287
+ "output_type": "display_data"
288
+ },
289
+ {
290
+ "data": {
291
+ "application/vnd.jupyter.widget-view+json": {
292
+ "model_id": "73c80676ca004de29295c8e5ef2512ea",
293
+ "version_major": 2,
294
+ "version_minor": 0
295
+ },
296
+ "text/plain": [
297
+ " 0%| | 0/20856 [00:00<?, ?ex/s]"
298
+ ]
299
+ },
300
+ "metadata": {},
301
+ "output_type": "display_data"
302
+ }
303
+ ],
304
+ "source": [
305
+ "dataset = dataset.map(clean)"
306
+ ]
307
+ },
308
+ {
309
+ "cell_type": "code",
310
+ "execution_count": 24,
311
+ "id": "d882cc08",
312
+ "metadata": {},
313
+ "outputs": [
314
+ {
315
+ "data": {
316
+ "text/plain": [
317
+ "{'text': 'Standard Oil Co and BP North America Inc said they plan to form a venture to manage the money market borrowing and investment activities of both companies. BP North America is a subsidiary of British Petroleum Co Plc &lt;BP> which also owns a 55 pct interest in Standard Oil. The venture will be called BP/Standard Financial Trading and will be operated by Standard Oil under the oversight of a joint management committee.',\n",
318
+ " 'target': 'STANDARD OIL <SRD> TO FORM FINANCIAL UNIT'}"
319
+ ]
320
+ },
321
+ "execution_count": 24,
322
+ "metadata": {},
323
+ "output_type": "execute_result"
324
+ }
325
+ ],
326
+ "source": [
327
+ "dataset['train'][1]"
328
+ ]
329
+ },
330
+ {
331
+ "cell_type": "code",
332
+ "execution_count": 25,
333
+ "id": "22c4da2e",
334
+ "metadata": {},
335
+ "outputs": [],
336
+ "source": [
337
+ "dataset.save_to_disk('reuters_processed')"
338
+ ]
339
+ },
340
+ {
341
+ "cell_type": "code",
342
+ "execution_count": 27,
343
+ "id": "95ef34aa",
344
+ "metadata": {},
345
+ "outputs": [
346
+ {
347
+ "data": {
348
+ "application/vnd.jupyter.widget-view+json": {
349
+ "model_id": "17b994c03fe84d0082f285fd972c4140",
350
+ "version_major": 2,
351
+ "version_minor": 0
352
+ },
353
+ "text/plain": [
354
+ "Creating CSV from Arrow format: 0%| | 0/3 [00:00<?, ?ba/s]"
355
+ ]
356
+ },
357
+ "metadata": {},
358
+ "output_type": "display_data"
359
+ },
360
+ {
361
+ "data": {
362
+ "application/vnd.jupyter.widget-view+json": {
363
+ "model_id": "4a3f180722184a2f90a288c35d313f3c",
364
+ "version_major": 2,
365
+ "version_minor": 0
366
+ },
367
+ "text/plain": [
368
+ "Creating CSV from Arrow format: 0%| | 0/1 [00:00<?, ?ba/s]"
369
+ ]
370
+ },
371
+ "metadata": {},
372
+ "output_type": "display_data"
373
+ },
374
+ {
375
+ "data": {
376
+ "text/plain": [
377
+ "807451"
378
+ ]
379
+ },
380
+ "execution_count": 27,
381
+ "metadata": {},
382
+ "output_type": "execute_result"
383
+ }
384
+ ],
385
+ "source": [
386
+ "dataset['train'].to_csv('reuters_train.csv', index=False, header=True)\n",
387
+ "dataset['test'].to_csv('reuters_test.csv', index=False, header=True)"
388
+ ]
389
+ },
390
+ {
391
+ "cell_type": "code",
392
+ "execution_count": null,
393
+ "id": "489257d1",
394
+ "metadata": {},
395
+ "outputs": [],
396
+ "source": []
397
+ }
398
+ ],
399
+ "metadata": {
400
+ "kernelspec": {
401
+ "display_name": "Python 3",
402
+ "language": "python",
403
+ "name": "python3"
404
+ },
405
+ "language_info": {
406
+ "codemirror_mode": {
407
+ "name": "ipython",
408
+ "version": 3
409
+ },
410
+ "file_extension": ".py",
411
+ "mimetype": "text/x-python",
412
+ "name": "python",
413
+ "nbconvert_exporter": "python",
414
+ "pygments_lexer": "ipython3",
415
+ "version": "3.8.8"
416
+ }
417
+ },
418
+ "nbformat": 4,
419
+ "nbformat_minor": 5
420
+ }
code/reuters_processed/dataset_dict.json ADDED
@@ -0,0 +1 @@
 
1
+ {"splits": ["test", "train"]}
code/reuters_processed/test/dataset.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5674ef9fdb039939b76e2bb55d2883b076a91557de80d13e8cba40267b2daac9
3
+ size 812240
code/reuters_processed/test/dataset_info.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": "reuters21578",
3
+ "citation": "@article{APTE94,\nauthor = {Chidanand Apt{'{e}} and Fred Damerau and Sholom M. Weiss},\ntitle = {Automated Learning of Decision Rules for Text Categorization},\njournal = {ACM Transactions on Information Systems},\nyear = {1994},\nnote = {To appear.}\n}\n\n@inproceedings{APTE94b,\nauthor = {Chidanand Apt{'{e}} and Fred Damerau and Sholom M. Weiss},\ntitle = {Toward Language Independent Automated Learning of Text Categorization Models},\nbooktitle = {sigir94},\nyear = {1994},\nnote = {To appear.}\n}\n\n@inproceedings{HAYES8},\nauthor = {Philip J. Hayes and Peggy M. Anderson and Irene B. Nirenburg and\nLinda M. Schmandt},\ntitle = {{TCS}: A Shell for Content-Based Text Categorization},\nbooktitle = {IEEE Conference on Artificial Intelligence Applications},\nyear = {1990}\n}\n\n@inproceedings{HAYES90b,\nauthor = {Philip J. Hayes and Steven P. Weinstein},\ntitle = {{CONSTRUE/TIS:} A System for Content-Based Indexing of a\nDatabase of News Stories},\nbooktitle = {Second Annual Conference on Innovative Applications of\nArtificial Intelligence},\nyear = {1990}\n}\n\n@incollection{HAYES92 ,\nauthor = {Philip J. Hayes},\ntitle = {Intelligent High-Volume Text Processing using Shallow,\nDomain-Specific Techniques},\nbooktitle = {Text-Based Intelligent Systems},\npublisher = {Lawrence Erlbaum},\naddress = {Hillsdale, NJ},\nyear = {1992},\neditor = {Paul S. Jacobs}\n}\n\n@inproceedings{LEWIS91c ,\nauthor = {David D. Lewis},\ntitle = {Evaluating Text Categorization},\nbooktitle = {Proceedings of Speech and Natural Language Workshop},\nyear = {1991},\nmonth = {feb},\norganization = {Defense Advanced Research Projects Agency},\npublisher = {Morgan Kaufmann},\npages = {312--318}\n\n}\n\n@phdthesis{LEWIS91d,\nauthor = {David Dolan Lewis},\ntitle = {Representation and Learning in Information Retrieval},\nschool = {Computer Science Dept.; Univ. of Massachusetts; Amherst, MA 01003},\nyear = 1992},\nnote = {Technical Report 91--93.}\n}\n\n@inproceedings{LEWIS91e,\nauthor = {David D. Lewis},\ntitle = {Data Extraction as Text Categorization: An Experiment with\nthe {MUC-3} Corpus},\nbooktitle = {Proceedings of the Third Message Understanding Evaluation\nand Conference},\nyear = {1991},\nmonth = {may},\norganization = {Defense Advanced Research Projects Agency},\npublisher = {Morgan Kaufmann},\naddress = {Los Altos, CA}\n\n}\n\n@inproceedings{LEWIS92b,\nauthor = {David D. Lewis},\ntitle = {An Evaluation of Phrasal and Clustered Representations on a Text\nCategorization Task},\nbooktitle = {Fifteenth Annual International ACM SIGIR Conference on\nResearch and Development in Information Retrieval},\nyear = {1992},\npages = {37--50}\n}\n\n@inproceedings{LEWIS92d ,\nauthor = {David D. Lewis and Richard M. Tong},\ntitle = {Text Filtering in {MUC-3} and {MUC-4}},\nbooktitle = {Proceedings of the Fourth Message Understanding Conference ({MUC-4})},\nyear = {1992},\nmonth = {jun},\norganization = {Defense Advanced Research Projects Agency},\npublisher = {Morgan Kaufmann},\naddress = {Los Altos, CA}\n}\n\n@inproceedings{LEWIS92e,\nauthor = {David D. Lewis},\ntitle = {Feature Selection and Feature Extraction for Text Categorization},\nbooktitle = {Proceedings of Speech and Natural Language Workshop},\nyear = {1992},\nmonth = {feb} ,\norganization = {Defense Advanced Research Projects Agency},\npublisher = {Morgan Kaufmann},\npages = {212--217}\n}\n\n@inproceedings{LEWIS94b,\nauthor = {David D. Lewis and Marc Ringuette},\ntitle = {A Comparison of Two Learning Algorithms for Text Categorization},\nbooktitle = {Symposium on Document Analysis and Information Retrieval},\nyear = {1994},\norganization = {ISRI; Univ. of Nevada, Las Vegas},\naddress = {Las Vegas, NV},\nmonth = {apr},\npages = {81--93}\n}\n\n@article{LEWIS94d,\nauthor = {David D. Lewis and Philip J. Hayes},\ntitle = {Guest Editorial},\njournal = {ACM Transactions on Information Systems},\nyear = {1994},\nvolume = {12},\nnumber = {3},\npages = {231},\nmonth = {jul}\n}\n\n@article{SPARCKJONES76,\nauthor = {K. {Sparck Jones} and C. J. {van Rijsbergen}},\ntitle = {Information Retrieval Test Collections},\njournal = {Journal of Documentation},\nyear = {1976},\nvolume = {32},\nnumber = {1},\npages = {59--75}\n}\n\n@book{WEISS91,\nauthor = {Sholom M. Weiss and Casimir A. Kulikowski},\ntitle = {Computer Systems That Learn},\npublisher = {Morgan Kaufmann},\nyear = {1991},\naddress = {San Mateo, CA}\n}\n",
4
+ "config_name": "ModHayes",
5
+ "dataset_size": 20019638,
6
+ "description": "The Reuters-21578 dataset is one of the most widely used data collections for text\ncategorization research. It is collected from the Reuters financial newswire service in 1987.\n",
7
+ "download_checksums": {
8
+ "https://kdd.ics.uci.edu/databases/reuters21578/reuters21578.tar.gz": {
9
+ "num_bytes": 8150596,
10
+ "checksum": "3bae43c9b14e387f76a61b6d82bf98a4fb5d3ef99ef7e7075ff2ccbcf59f9d30"
11
+ }
12
+ },
13
+ "download_size": 8150596,
14
+ "features": {
15
+ "text": {
16
+ "dtype": "string",
17
+ "id": null,
18
+ "_type": "Value"
19
+ },
20
+ "target": {
21
+ "dtype": "string",
22
+ "id": null,
23
+ "_type": "Value"
24
+ }
25
+ },
26
+ "homepage": "https://kdd.ics.uci.edu/databases/reuters21578/reuters21578.html",
27
+ "license": "",
28
+ "post_processed": null,
29
+ "post_processing_size": null,
30
+ "size_in_bytes": 28170234,
31
+ "splits": {
32
+ "test": {
33
+ "name": "test",
34
+ "num_bytes": 948316,
35
+ "num_examples": 722,
36
+ "dataset_name": "reuters21578"
37
+ },
38
+ "train": {
39
+ "name": "train",
40
+ "num_bytes": 19071322,
41
+ "num_examples": 20856,
42
+ "dataset_name": "reuters21578"
43
+ }
44
+ },
45
+ "supervised_keys": null,
46
+ "task_templates": null,
47
+ "version": {
48
+ "version_str": "1.0.0",
49
+ "description": "",
50
+ "major": 1,
51
+ "minor": 0,
52
+ "patch": 0
53
+ }
54
+ }
code/reuters_processed/test/state.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "dataset.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "226b9d2adcbef331",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_indexes": {},
12
+ "_output_all_columns": false,
13
+ "_split": "test"
14
+ }
code/reuters_processed/train/dataset.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:972f47423ef2291c6c6e7d6ed8849434c964270f87ad1f91d51edea98a0f1df4
3
+ size 15716672
code/reuters_processed/train/dataset_info.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": "reuters21578",
3
+ "citation": "@article{APTE94,\nauthor = {Chidanand Apt{'{e}} and Fred Damerau and Sholom M. Weiss},\ntitle = {Automated Learning of Decision Rules for Text Categorization},\njournal = {ACM Transactions on Information Systems},\nyear = {1994},\nnote = {To appear.}\n}\n\n@inproceedings{APTE94b,\nauthor = {Chidanand Apt{'{e}} and Fred Damerau and Sholom M. Weiss},\ntitle = {Toward Language Independent Automated Learning of Text Categorization Models},\nbooktitle = {sigir94},\nyear = {1994},\nnote = {To appear.}\n}\n\n@inproceedings{HAYES8},\nauthor = {Philip J. Hayes and Peggy M. Anderson and Irene B. Nirenburg and\nLinda M. Schmandt},\ntitle = {{TCS}: A Shell for Content-Based Text Categorization},\nbooktitle = {IEEE Conference on Artificial Intelligence Applications},\nyear = {1990}\n}\n\n@inproceedings{HAYES90b,\nauthor = {Philip J. Hayes and Steven P. Weinstein},\ntitle = {{CONSTRUE/TIS:} A System for Content-Based Indexing of a\nDatabase of News Stories},\nbooktitle = {Second Annual Conference on Innovative Applications of\nArtificial Intelligence},\nyear = {1990}\n}\n\n@incollection{HAYES92 ,\nauthor = {Philip J. Hayes},\ntitle = {Intelligent High-Volume Text Processing using Shallow,\nDomain-Specific Techniques},\nbooktitle = {Text-Based Intelligent Systems},\npublisher = {Lawrence Erlbaum},\naddress = {Hillsdale, NJ},\nyear = {1992},\neditor = {Paul S. Jacobs}\n}\n\n@inproceedings{LEWIS91c ,\nauthor = {David D. Lewis},\ntitle = {Evaluating Text Categorization},\nbooktitle = {Proceedings of Speech and Natural Language Workshop},\nyear = {1991},\nmonth = {feb},\norganization = {Defense Advanced Research Projects Agency},\npublisher = {Morgan Kaufmann},\npages = {312--318}\n\n}\n\n@phdthesis{LEWIS91d,\nauthor = {David Dolan Lewis},\ntitle = {Representation and Learning in Information Retrieval},\nschool = {Computer Science Dept.; Univ. of Massachusetts; Amherst, MA 01003},\nyear = 1992},\nnote = {Technical Report 91--93.}\n}\n\n@inproceedings{LEWIS91e,\nauthor = {David D. Lewis},\ntitle = {Data Extraction as Text Categorization: An Experiment with\nthe {MUC-3} Corpus},\nbooktitle = {Proceedings of the Third Message Understanding Evaluation\nand Conference},\nyear = {1991},\nmonth = {may},\norganization = {Defense Advanced Research Projects Agency},\npublisher = {Morgan Kaufmann},\naddress = {Los Altos, CA}\n\n}\n\n@inproceedings{LEWIS92b,\nauthor = {David D. Lewis},\ntitle = {An Evaluation of Phrasal and Clustered Representations on a Text\nCategorization Task},\nbooktitle = {Fifteenth Annual International ACM SIGIR Conference on\nResearch and Development in Information Retrieval},\nyear = {1992},\npages = {37--50}\n}\n\n@inproceedings{LEWIS92d ,\nauthor = {David D. Lewis and Richard M. Tong},\ntitle = {Text Filtering in {MUC-3} and {MUC-4}},\nbooktitle = {Proceedings of the Fourth Message Understanding Conference ({MUC-4})},\nyear = {1992},\nmonth = {jun},\norganization = {Defense Advanced Research Projects Agency},\npublisher = {Morgan Kaufmann},\naddress = {Los Altos, CA}\n}\n\n@inproceedings{LEWIS92e,\nauthor = {David D. Lewis},\ntitle = {Feature Selection and Feature Extraction for Text Categorization},\nbooktitle = {Proceedings of Speech and Natural Language Workshop},\nyear = {1992},\nmonth = {feb} ,\norganization = {Defense Advanced Research Projects Agency},\npublisher = {Morgan Kaufmann},\npages = {212--217}\n}\n\n@inproceedings{LEWIS94b,\nauthor = {David D. Lewis and Marc Ringuette},\ntitle = {A Comparison of Two Learning Algorithms for Text Categorization},\nbooktitle = {Symposium on Document Analysis and Information Retrieval},\nyear = {1994},\norganization = {ISRI; Univ. of Nevada, Las Vegas},\naddress = {Las Vegas, NV},\nmonth = {apr},\npages = {81--93}\n}\n\n@article{LEWIS94d,\nauthor = {David D. Lewis and Philip J. Hayes},\ntitle = {Guest Editorial},\njournal = {ACM Transactions on Information Systems},\nyear = {1994},\nvolume = {12},\nnumber = {3},\npages = {231},\nmonth = {jul}\n}\n\n@article{SPARCKJONES76,\nauthor = {K. {Sparck Jones} and C. J. {van Rijsbergen}},\ntitle = {Information Retrieval Test Collections},\njournal = {Journal of Documentation},\nyear = {1976},\nvolume = {32},\nnumber = {1},\npages = {59--75}\n}\n\n@book{WEISS91,\nauthor = {Sholom M. Weiss and Casimir A. Kulikowski},\ntitle = {Computer Systems That Learn},\npublisher = {Morgan Kaufmann},\nyear = {1991},\naddress = {San Mateo, CA}\n}\n",
4
+ "config_name": "ModHayes",
5
+ "dataset_size": 20019638,
6
+ "description": "The Reuters-21578 dataset is one of the most widely used data collections for text\ncategorization research. It is collected from the Reuters financial newswire service in 1987.\n",
7
+ "download_checksums": {
8
+ "https://kdd.ics.uci.edu/databases/reuters21578/reuters21578.tar.gz": {
9
+ "num_bytes": 8150596,
10
+ "checksum": "3bae43c9b14e387f76a61b6d82bf98a4fb5d3ef99ef7e7075ff2ccbcf59f9d30"
11
+ }
12
+ },
13
+ "download_size": 8150596,
14
+ "features": {
15
+ "text": {
16
+ "dtype": "string",
17
+ "id": null,
18
+ "_type": "Value"
19
+ },
20
+ "target": {
21
+ "dtype": "string",
22
+ "id": null,
23
+ "_type": "Value"
24
+ }
25
+ },
26
+ "homepage": "https://kdd.ics.uci.edu/databases/reuters21578/reuters21578.html",
27
+ "license": "",
28
+ "post_processed": null,
29
+ "post_processing_size": null,
30
+ "size_in_bytes": 28170234,
31
+ "splits": {
32
+ "test": {
33
+ "name": "test",
34
+ "num_bytes": 948316,
35
+ "num_examples": 722,
36
+ "dataset_name": "reuters21578"
37
+ },
38
+ "train": {
39
+ "name": "train",
40
+ "num_bytes": 19071322,
41
+ "num_examples": 20856,
42
+ "dataset_name": "reuters21578"
43
+ }
44
+ },
45
+ "supervised_keys": null,
46
+ "task_templates": null,
47
+ "version": {
48
+ "version_str": "1.0.0",
49
+ "description": "",
50
+ "major": 1,
51
+ "minor": 0,
52
+ "patch": 0
53
+ }
54
+ }
code/reuters_processed/train/state.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "dataset.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "ed601f62656c294e",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_indexes": {},
12
+ "_output_all_columns": false,
13
+ "_split": "train"
14
+ }
code/reuters_test.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:925c8919afd37bb4acf70b916ae7c0a24d4ae96aae40d687dfbcb83df267c22c
3
+ size 807451
code/reuters_train.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c41f0ae30ba16e6b1ddb087371caa91cc44c165f86cf91d85b766397c3109415
3
+ size 15589359