lukemelas commited on
Commit
ff4cf6a
1 Parent(s): 1258ac1
Files changed (1) hide show
  1. test-dataset-debug.py +63 -80
test-dataset-debug.py CHANGED
@@ -1,45 +1,43 @@
1
- """TODO: Add a description here."""
 
 
 
 
 
 
 
2
 
3
  from __future__ import absolute_import, division, print_function
4
 
5
- import json
6
  import os
7
  import datetime
8
  import pandas as pd
9
  import numpy as np
10
  from pathlib import Path
 
 
 
 
11
 
12
  import datasets
13
 
14
 
15
- # TODO: Add BibTeX citation
16
  _CITATION = """\
17
- @InProceedings{huggingface:dataset,
18
- title = {A great new dataset},
19
- authors={huggingface, Inc.
20
- },
21
- year={2020}
22
  }
23
  """
24
 
25
- # TODO: Add description of the dataset here
26
- _DESCRIPTION = """TODO: Add description"""
27
-
28
- # # URLs for production
29
- # _METADATA_URL = "https://patentdiag.blob.core.windows.net/patent-data/metadata-2021-02-10.feather"
30
- # # _METADATA_URL = "https://patentdiag.blob.core.windows.net/patent-data/metadata-2021-01-21.feather"
31
- # _DATA_URL = "https://patentdiag.blob.core.windows.net/patent-data/distilled-2021-01-07.tar"
32
- # _DATA_SUBFOLDER_NAME = 'distilled'
33
-
34
- # # URLs for debugging
35
- # _METADATA_URL = _DEBUG_METADATA_URL = "https://patentdiag.blob.core.windows.net/patent-data/metadata_debug-2021-02-10.feather"
36
- # _DATA_URL = _DEBUG_DATA_URL = "https://patentdiag.blob.core.windows.net/patent-data/distilled_debug-2021-01-07.tar"
37
- # _DATA_SUBFOLDER_NAME = _DATA_SUBFOLDER_NAME = 'debug_distilled'
38
-
39
- # URLs for figuring out the Huggingface Hub
40
- _METADATA_URL = "https://huggingface.co/datasets/greeneggsandyaml/test-dataset-debug/resolve/main/metadata--Jan2016--2021-02-10.feather"
41
- _DATA_URL = "https://huggingface.co/datasets/greeneggsandyaml/test-dataset-debug/resolve/main/json-files-Jan2016.tar"
42
- _DATA_SUBFOLDER_NAME = 'json-files-Jan2016'
43
 
44
  RANDOM_STATE = 1729
45
 
@@ -73,8 +71,11 @@ class PatentsConfig(datasets.BuilderConfig):
73
 
74
  def __init__(
75
  self,
76
- ipcr_label: str = None, # 'G06F',
77
- cpc_label: str = None, # 'G06F',
 
 
 
78
  train_filing_start_date: str = None,
79
  train_filing_end_date: str = None,
80
  val_filing_start_date: str = None,
@@ -82,7 +83,6 @@ class PatentsConfig(datasets.BuilderConfig):
82
  query_string: str = None,
83
  val_set_balancer=False,
84
  uniform_split=False,
85
- train_only=False,
86
  **kwargs
87
  ):
88
  """
@@ -92,6 +92,9 @@ class PatentsConfig(datasets.BuilderConfig):
92
  train_filing_end_date.
93
 
94
  Args:
 
 
 
95
  ipcr_label: International Patent Classification code
96
  cpc_label: Cooperative Patent Classification code
97
  train_filing_start_date: Start date for patents in train set (and val set if random split is used)
@@ -101,6 +104,9 @@ class PatentsConfig(datasets.BuilderConfig):
101
  **kwargs: keyword arguments forwarded to super.
102
  """
103
  super().__init__(**kwargs)
 
 
 
104
  self.ipcr_label = ipcr_label
105
  self.cpc_label = cpc_label
106
  self.train_filing_start_date = train_filing_start_date
@@ -110,11 +116,10 @@ class PatentsConfig(datasets.BuilderConfig):
110
  self.query_string = query_string
111
  self.val_set_balancer = val_set_balancer
112
  self.uniform_split = uniform_split
113
- self.train_only = train_only
114
 
115
 
116
  class Patents(datasets.GeneratorBasedBuilder):
117
- """TODO: Add description"""
118
 
119
  VERSION = datasets.Version("1.0.1")
120
 
@@ -122,10 +127,22 @@ class Patents(datasets.GeneratorBasedBuilder):
122
  # If you don't want/need to define several sub-sets in your dataset,
123
  # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
124
  BUILDER_CONFIG_CLASS = PatentsConfig
125
- # BUILDER_CONFIGS = [
126
- # PatentsConfig(name="my_dataset_" + size, description="A small dataset", data_size=size)
127
- # for size in ["small", "medium", "large"]
128
- # ]
 
 
 
 
 
 
 
 
 
 
 
 
129
 
130
  def _info(self):
131
  return datasets.DatasetInfo(
@@ -139,8 +156,7 @@ class Patents(datasets.GeneratorBasedBuilder):
139
  # specify them here. They'll be used if as_supervised=True in
140
  # builder.as_dataset.
141
  supervised_keys=("claims", "decision"),
142
- # TODO: Homepage of the dataset for documentation
143
- homepage="https://huggingface.co/great-new-dataset",
144
  citation=_CITATION,
145
  )
146
 
@@ -149,24 +165,16 @@ class Patents(datasets.GeneratorBasedBuilder):
149
  print(f'Loading dataset with config: {self.config}')
150
 
151
  # Download metadata
152
- # NOTE: data_files is a path to a pickled pandas DataFrame
153
- if self.config.data_files is None:
154
- print(f'Loading or downloading metadata file: {_METADATA_URL}')
155
- metadata_file = dl_manager.download_and_extract(_METADATA_URL)
156
- else:
157
- print(f'Using metadata file: {self.config.data_files}')
158
- metadata_file = Path(self.config.data_files)
159
 
160
  # Download data
161
- # NOTE: data_dir is a path to a directory of json files, with one
162
- # json file per patent application
163
- if self.config.data_dir is None:
164
- print('Loading or downloading data. If downloading, watch out! This is a huge file (360GB)!')
165
- json_dir = Path(dl_manager.download_and_extract(_DATA_URL))
166
- # NOTE: The extracted path contains a subfolder
167
- json_dir = json_dir / _DATA_SUBFOLDER_NAME
168
- else:
169
- json_dir = Path(self.config.data_dir)
170
 
171
  # Load metadata file
172
  print(f'Reading metadata file: {metadata_file}')
@@ -181,32 +189,9 @@ class Patents(datasets.GeneratorBasedBuilder):
181
  df = df[df['main_cpc_label'].str.startswith(self.config.cpc_label)]
182
 
183
  # Filter metadata based on arbitrary query string
184
- # TODO(suproteem): remove for production
185
  if self.config.query_string:
186
  df = df.query(self.config.query_string)
187
 
188
-
189
-
190
- # Return only one dataset
191
- if self.config.train_only:
192
- if self.config.train_filing_start_date:
193
- print(f'Filtering by train filing start date: {self.config.train_filing_start_date}')
194
- df = df[df['filing_date'] >= self.config.train_filing_start_date]
195
- if self.config.train_filing_end_date:
196
- print(f'Filtering by train filing end date: {self.config.train_filing_end_date}')
197
- df = df[df['filing_date'] <= self.config.train_filing_end_date]
198
-
199
- return [
200
- datasets.SplitGenerator(
201
- name=datasets.Split.TRAIN,
202
- gen_kwargs=dict( # kwargs passed to _generate_examples
203
- df=df,
204
- json_dir=json_dir,
205
- split='train',
206
- ),
207
- )
208
- ]
209
-
210
  # Train-validation split (either uniform or by date)
211
  if self.config.uniform_split:
212
 
@@ -242,7 +227,7 @@ class Patents(datasets.GeneratorBasedBuilder):
242
  (df['filing_date'] < self.config.val_filing_end_date)
243
  ]
244
 
245
- # TODO: Can make this step faster
246
  if self.config.val_set_balancer:
247
  rejected_df = val_df[val_df.status == 'REJECTED']
248
  num_rejected = len(rejected_df)
@@ -259,7 +244,7 @@ class Patents(datasets.GeneratorBasedBuilder):
259
  return [
260
  datasets.SplitGenerator(
261
  name=datasets.Split.TRAIN,
262
- gen_kwargs=dict( # kwargs passed to _generate_examples
263
  df=train_df,
264
  json_dir=json_dir,
265
  split='train',
@@ -311,6 +296,4 @@ class Patents(datasets.GeneratorBasedBuilder):
311
  'date_published': patent['date_published'],
312
  'examiner_id': patent['examiner_id'],
313
  "ipc_label": patent["main_ipcr_label"],
314
- # "all_cpc_labels": patent["cpc_labels"], # these are lists, ignoring for now
315
- # 'inventor_list': patent['inventor_list'], # these are lists, ignoring for now
316
  }
 
1
+ """
2
+ The Harvard USPTO Patent Dataset (HUPD) is a large-scale, well-structured, and multi-purpose corpus
3
+ of English-language patent applications filed to the United States Patent and Trademark Office (USPTO)
4
+ between 2004 and 2018. With more than 4.5 million patent documents, HUPD is two to three times larger
5
+ than comparable corpora. Unlike other NLP patent datasets, HUPD contains the inventor-submitted versions
6
+ of patent applications, not the final versions of granted patents, allowing us to study patentability at
7
+ the time of filing using NLP methods for the first time.
8
+ """
9
 
10
  from __future__ import absolute_import, division, print_function
11
 
 
12
  import os
13
  import datetime
14
  import pandas as pd
15
  import numpy as np
16
  from pathlib import Path
17
+ try:
18
+ import ujson as json
19
+ except:
20
+ import json
21
 
22
  import datasets
23
 
24
 
 
25
  _CITATION = """\
26
+ @InProceedings{suzgun2021:hupd,
27
+ title = {The Harvard USPTO Patent Dataset},
28
+ authors={Mirac Suzgun and Suproteem Sarkar and Luke Melas-Kyriazi and Scott Kominers and Stuart Shieber},
29
+ year={2021}
 
30
  }
31
  """
32
 
33
+ _DESCRIPTION = """
34
+ The Harvard USPTO Patent Dataset (HUPD) is a large-scale, well-structured, and multi-purpose corpus
35
+ of English-language patent applications filed to the United States Patent and Trademark Office (USPTO)
36
+ between 2004 and 2018. With more than 4.5 million patent documents, HUPD is two to three times larger
37
+ than comparable corpora. Unlike other NLP patent datasets, HUPD contains the inventor-submitted versions
38
+ of patent applications, not the final versions of granted patents, allowing us to study patentability at
39
+ the time of filing using NLP methods for the first time.
40
+ """
 
 
 
 
 
 
 
 
 
 
41
 
42
  RANDOM_STATE = 1729
43
 
 
71
 
72
  def __init__(
73
  self,
74
+ metadata_url: str,
75
+ data_url: str,
76
+ data_dir: str,
77
+ ipcr_label: str = None,
78
+ cpc_label: str = None,
79
  train_filing_start_date: str = None,
80
  train_filing_end_date: str = None,
81
  val_filing_start_date: str = None,
 
83
  query_string: str = None,
84
  val_set_balancer=False,
85
  uniform_split=False,
 
86
  **kwargs
87
  ):
88
  """
 
92
  train_filing_end_date.
93
 
94
  Args:
95
+ metadata_url: `string`, url from which to download the metadata file
96
+ data_url: `string`, url from which to download the json files
97
+ data_dir: `string`, folder (in cache) in which downloaded json files are stored
98
  ipcr_label: International Patent Classification code
99
  cpc_label: Cooperative Patent Classification code
100
  train_filing_start_date: Start date for patents in train set (and val set if random split is used)
 
104
  **kwargs: keyword arguments forwarded to super.
105
  """
106
  super().__init__(**kwargs)
107
+ self.metadata_url = metadata_url
108
+ self.data_url = data_url
109
+ self.data_dir = data_dir
110
  self.ipcr_label = ipcr_label
111
  self.cpc_label = cpc_label
112
  self.train_filing_start_date = train_filing_start_date
 
116
  self.query_string = query_string
117
  self.val_set_balancer = val_set_balancer
118
  self.uniform_split = uniform_split
 
119
 
120
 
121
  class Patents(datasets.GeneratorBasedBuilder):
122
+ _DESCRIPTION
123
 
124
  VERSION = datasets.Version("1.0.1")
125
 
 
127
  # If you don't want/need to define several sub-sets in your dataset,
128
  # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
129
  BUILDER_CONFIG_CLASS = PatentsConfig
130
+ BUILDER_CONFIGS = [
131
+ PatentsConfig(
132
+ name="sample",
133
+ description="Patent data from January 2016, for debugging",
134
+ metadata_url="https://huggingface.co/datasets/greeneggsandyaml/test-dataset-debug/resolve/main/metadata--Jan2016--2021-02-10.feather",
135
+ data_url="https://huggingface.co/datasets/greeneggsandyaml/test-dataset-debug/resolve/main/json-files-Jan2016.tar",
136
+ data_dir="json-files-Jan2016",
137
+ ),
138
+ PatentsConfig(
139
+ name="all",
140
+ description="Patent data from January 2016, for debugging",
141
+ metadata_url="https://patentdiag.blob.core.windows.net/patent-data/metadata-2021-02-10.feather",
142
+ data_url="https://patentdiag.blob.core.windows.net/patent-data/distilled-2021-01-07.tar",
143
+ data_dir="distilled",
144
+ ),
145
+ ]
146
 
147
  def _info(self):
148
  return datasets.DatasetInfo(
 
156
  # specify them here. They'll be used if as_supervised=True in
157
  # builder.as_dataset.
158
  supervised_keys=("claims", "decision"),
159
+ homepage="https://github.com/suzgunmirac/hupd",
 
160
  citation=_CITATION,
161
  )
162
 
 
165
  print(f'Loading dataset with config: {self.config}')
166
 
167
  # Download metadata
168
+ # NOTE: Metadata is stored as a Pandas DataFrame in Apache Feather format
169
+ metadata_file = dl_manager.download_and_extract(self.config.metadata_url)
170
+ metadata_file = Path(self.config.data_files)
171
+ print(f'Using metadata file: {self.config.data_files}')
 
 
 
172
 
173
  # Download data
174
+ # NOTE: The extracted path contains a subfolder, data_dir. This directory holds
175
+ # a large number of json files (one json file per patent application).
176
+ download_dir = dl_manager.download_and_extract(self.config.data_url)
177
+ json_dir = os.path.join(download_dir, self.config.data_dir)
 
 
 
 
 
178
 
179
  # Load metadata file
180
  print(f'Reading metadata file: {metadata_file}')
 
189
  df = df[df['main_cpc_label'].str.startswith(self.config.cpc_label)]
190
 
191
  # Filter metadata based on arbitrary query string
 
192
  if self.config.query_string:
193
  df = df.query(self.config.query_string)
194
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
  # Train-validation split (either uniform or by date)
196
  if self.config.uniform_split:
197
 
 
227
  (df['filing_date'] < self.config.val_filing_end_date)
228
  ]
229
 
230
+ # TODO: We can probably make this step faster
231
  if self.config.val_set_balancer:
232
  rejected_df = val_df[val_df.status == 'REJECTED']
233
  num_rejected = len(rejected_df)
 
244
  return [
245
  datasets.SplitGenerator(
246
  name=datasets.Split.TRAIN,
247
+ gen_kwargs=dict( # these kwargs are passed to _generate_examples
248
  df=train_df,
249
  json_dir=json_dir,
250
  split='train',
 
296
  'date_published': patent['date_published'],
297
  'examiner_id': patent['examiner_id'],
298
  "ipc_label": patent["main_ipcr_label"],
 
 
299
  }