egm517 commited on
Commit
705586e
1 Parent(s): fda83af

Upload hupd.py

Browse files
Files changed (1) hide show
  1. hupd.py +333 -0
hupd.py ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The Harvard USPTO Patent Dataset (HUPD) is a large-scale, well-structured, and multi-purpose corpus
3
+ of English-language patent applications filed to the United States Patent and Trademark Office (USPTO)
4
+ between 2004 and 2018. With more than 4.5 million patent documents, HUPD is two to three times larger
5
+ than comparable corpora. Unlike other NLP patent datasets, HUPD contains the inventor-submitted versions
6
+ of patent applications, not the final versions of granted patents, allowing us to study patentability at
7
+ the time of filing using NLP methods for the first time.
8
+ """
9
+
10
+ from __future__ import absolute_import, division, print_function
11
+
12
+ import os
13
+ import datetime
14
+ import pandas as pd
15
+ import numpy as np
16
+ from pathlib import Path
17
+ try:
18
+ import ujson as json
19
+ except:
20
+ import json
21
+
22
+ import datasets
23
+
24
+
25
+ _CITATION = """\
26
+ @InProceedings{suzgun2021:hupd,
27
+ title = {The Harvard USPTO Patent Dataset},
28
+ authors={Mirac Suzgun and Suproteem Sarkar and Luke Melas-Kyriazi and Scott Kominers and Stuart Shieber},
29
+ year={2021}
30
+ }
31
+ """
32
+
33
+ _DESCRIPTION = """
34
+ The Harvard USPTO Patent Dataset (HUPD) is a large-scale, well-structured, and multi-purpose corpus
35
+ of English-language patent applications filed to the United States Patent and Trademark Office (USPTO)
36
+ between 2004 and 2018. With more than 4.5 million patent documents, HUPD is two to three times larger
37
+ than comparable corpora. Unlike other NLP patent datasets, HUPD contains the inventor-submitted versions
38
+ of patent applications, not the final versions of granted patents, allowing us to study patentability at
39
+ the time of filing using NLP methods for the first time.
40
+ """
41
+
42
+ RANDOM_STATE = 1729
43
+
44
+ _FEATURES = [
45
+ "patent_number",
46
+ "decision",
47
+ "title",
48
+ "abstract",
49
+ "claims",
50
+ "background",
51
+ "summary",
52
+ "description",
53
+ "cpc_label",
54
+ "ipc_label",
55
+ "filing_date",
56
+ "patent_issue_date",
57
+ "date_published",
58
+ "examiner_id"
59
+ ]
60
+
61
+
62
+ def str_to_date(s):
63
+ """A helper function to convert strings to dates"""
64
+ return datetime.datetime.strptime(s, '%Y-%m-%d')
65
+
66
+
67
+ class PatentsConfig(datasets.BuilderConfig):
68
+ """BuilderConfig for Patents"""
69
+
70
+ def __init__(
71
+ self,
72
+ metadata_url: str,
73
+ data_url: str,
74
+ data_dir: str,
75
+ ipcr_label: str = None,
76
+ cpc_label: str = None,
77
+ train_filing_start_date: str = None,
78
+ train_filing_end_date: str = None,
79
+ val_filing_start_date: str = None,
80
+ val_filing_end_date: str = None,
81
+ query_string: str = None,
82
+ val_set_balancer=False,
83
+ uniform_split=False,
84
+ force_extract=False,
85
+ **kwargs
86
+ ):
87
+ """
88
+ If train_filing_end_date is None, then a random train-val split will be used. If it is
89
+ specified, then the specified date range will be used for the split. If train_filing_end_date
90
+ if specified and val_filing_start_date is not specifed, then val_filing_start_date defaults to
91
+ train_filing_end_date.
92
+
93
+ Args:
94
+ metadata_url: `string`, url from which to download the metadata file
95
+ data_url: `string`, url from which to download the json files
96
+ data_dir: `string`, folder (in cache) in which downloaded json files are stored
97
+ ipcr_label: International Patent Classification code
98
+ cpc_label: Cooperative Patent Classification code
99
+ train_filing_start_date: Start date for patents in train set (and val set if random split is used)
100
+ train_filing_end_date: End date for patents in train set
101
+ val_filing_start_date: Start date for patents in val set
102
+ val_filing_end_date: End date for patents in val set (and train set if random split is used)
103
+ force_extract: Extract only the relevant years if this parameter is used.
104
+ **kwargs: keyword arguments forwarded to super
105
+ """
106
+ super().__init__(**kwargs)
107
+ self.metadata_url = metadata_url
108
+ self.data_url = data_url
109
+ self.data_dir = data_dir
110
+ self.ipcr_label = ipcr_label
111
+ self.cpc_label = cpc_label
112
+ self.train_filing_start_date = train_filing_start_date
113
+ self.train_filing_end_date = train_filing_end_date
114
+ self.val_filing_start_date = val_filing_start_date
115
+ self.val_filing_end_date = val_filing_end_date
116
+ self.query_string = query_string
117
+ self.val_set_balancer = val_set_balancer
118
+ self.uniform_split = uniform_split
119
+ self.force_extract = force_extract
120
+
121
+
122
+ class Patents(datasets.GeneratorBasedBuilder):
123
+ _DESCRIPTION
124
+
125
+ VERSION = datasets.Version("1.0.2")
126
+
127
+ # This is an example of a dataset with multiple configurations.
128
+ # If you don't want/need to define several sub-sets in your dataset,
129
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
130
+ BUILDER_CONFIG_CLASS = PatentsConfig
131
+ BUILDER_CONFIGS = [
132
+ PatentsConfig(
133
+ name="sample",
134
+ description="Patent data from January 2016, for debugging",
135
+ metadata_url="https://huggingface.co/datasets/HUPD/hupd/resolve/main/hupd_metadata_jan16_2022-02-22.feather",
136
+ data_url="https://huggingface.co/datasets/HUPD/hupd/resolve/main/data/sample-jan-2016.tar.gz",
137
+ data_dir="sample", # this will unpack to data/sample/2016
138
+ ),
139
+ PatentsConfig(
140
+ name="all",
141
+ description="Patent data from all years (2004-2018)",
142
+ metadata_url="https://huggingface.co/datasets/HUPD/hupd/resolve/main/hupd_metadata_2022-02-22.feather",
143
+ data_url="https://huggingface.co/datasets/HUPD/hupd/resolve/main/data/all-years.tar",
144
+ data_dir="data", # this will unpack to data/{year}
145
+ ),
146
+ ]
147
+
148
+ def _info(self):
149
+ return datasets.DatasetInfo(
150
+ # This is the description that will appear on the datasets page.
151
+ description=_DESCRIPTION,
152
+ # This defines the different columns of the dataset and their types
153
+ features=datasets.Features(
154
+ {k: datasets.Value("string") for k in _FEATURES}
155
+ ),
156
+ # If there's a common (input, target) tuple from the features,
157
+ # specify them here. They'll be used if as_supervised=True in
158
+ # builder.as_dataset.
159
+ supervised_keys=("claims", "decision"),
160
+ homepage="https://github.com/suzgunmirac/hupd",
161
+ citation=_CITATION,
162
+ )
163
+
164
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
165
+ """Returns SplitGenerators."""
166
+ print(f'Loading dataset with config: {self.config}')
167
+
168
+ # Download metadata
169
+ # NOTE: Metadata is stored as a Pandas DataFrame in Apache Feather format
170
+ metadata_url = self.config.metadata_url
171
+ metadata_file = dl_manager.download_and_extract(self.config.metadata_url)
172
+ print(f'Using metadata file: {metadata_file}')
173
+
174
+ # Download data
175
+ # NOTE: The extracted path contains a subfolder, data_dir. This directory holds
176
+ # a large number of json files (one json file per patent application).
177
+ download_dir = dl_manager.download_and_extract(self.config.data_url)
178
+ json_dir = os.path.join(download_dir, self.config.data_dir)
179
+
180
+ # Load metadata file
181
+ print(f'Reading metadata file: {metadata_file}')
182
+ if metadata_url.endswith('.feather'):
183
+ df = pd.read_feather(metadata_file)
184
+ elif metadata_url.endswith('.csv'):
185
+ df = pd.read_csv(metadata_file)
186
+ elif metadata_url.endswith('.tsv'):
187
+ df = pd.read_csv(metadata_file, delimiter='\t')
188
+ elif metadata_url.endswith('.pickle'):
189
+ df = pd.read_pickle(metadata_file)
190
+ else:
191
+ raise ValueError(f'Metadata file invalid: {metadata_url}')
192
+
193
+ # Filter based on ICPR / CPC label
194
+ if self.config.ipcr_label:
195
+ print(f'Filtering by IPCR label: {self.config.ipcr_label}')
196
+ df = df[df['main_ipcr_label'].str.startswith(self.config.ipcr_label)]
197
+ elif self.config.cpc_label:
198
+ print(f'Filtering by CPC label: {self.config.cpc_label}')
199
+ df = df[df['main_cpc_label'].str.startswith(self.config.cpc_label)]
200
+
201
+ # Filter metadata based on arbitrary query string
202
+ if self.config.query_string:
203
+ df = df.query(self.config.query_string)
204
+
205
+ if self.config.force_extract:
206
+ if self.config.name == 'all':
207
+ if self.config.train_filing_start_date and self.config.val_filing_end_date:
208
+ if self.config.train_filing_end_date and self.config.val_filing_start_date:
209
+ training_year_range = set(range(int(self.config.train_filing_start_date[:4]), int(self.config.train_filing_end_date[:4]) + 1))
210
+ validation_year_range = set(range(int(self.config.val_filing_start_date[:4]), int(self.config.val_filing_end_date[:4]) + 1))
211
+ full_year_range = training_year_range.union(validation_year_range)
212
+ else:
213
+ full_year_range = set(range(int(self.config.train_filing_start_date[:4]), int(self.config.val_filing_end_date[:4]) + 1))
214
+ else:
215
+ full_year_range = set(range(2004, 2019))
216
+
217
+
218
+ import tarfile
219
+ for year in full_year_range:
220
+ tar_file_path = f'{json_dir}/{year}.tar.gz'
221
+ print(f'Extracting {tar_file_path}')
222
+ # open file
223
+ tar_file = tarfile.open(tar_file_path)
224
+ # extracting file
225
+ tar_file.extractall(f'{json_dir}')
226
+ tar_file.close()
227
+
228
+ # Train-validation split (either uniform or by date)
229
+ if self.config.uniform_split:
230
+
231
+ # Assumes that training_start_data < val_end_date
232
+ if self.config.train_filing_start_date:
233
+ df = df[df['filing_date'] >= self.config.train_filing_start_date]
234
+ if self.config.val_filing_end_date:
235
+ df = df[df['filing_date'] <= self.config.val_filing_end_date]
236
+ df = df.sample(frac=1.0, random_state=RANDOM_STATE)
237
+ num_train_samples = int(len(df) * 0.85)
238
+ train_df = df.iloc[0:num_train_samples]
239
+ val_df = df.iloc[num_train_samples:-1]
240
+
241
+ else:
242
+
243
+ # Check
244
+ if not (self.config.train_filing_start_date and self.config.train_filing_end_date and
245
+ self.config.val_filing_start_date and self.config.train_filing_end_date):
246
+ raise ValueError("Please either use uniform_split or specify your exact \
247
+ training and validation split dates.")
248
+
249
+ # Does not assume that training_start_data < val_end_date
250
+ print(f'Filtering train dataset by filing start date: {self.config.train_filing_start_date}')
251
+ print(f'Filtering train dataset by filing end date: {self.config.train_filing_end_date}')
252
+ print(f'Filtering val dataset by filing start date: {self.config.val_filing_start_date}')
253
+ print(f'Filtering val dataset by filing end date: {self.config.val_filing_end_date}')
254
+ train_df = df[
255
+ (df['filing_date'] >= self.config.train_filing_start_date) &
256
+ (df['filing_date'] < self.config.train_filing_end_date)
257
+ ]
258
+ val_df = df[
259
+ (df['filing_date'] >= self.config.val_filing_start_date) &
260
+ (df['filing_date'] < self.config.val_filing_end_date)
261
+ ]
262
+
263
+ # TODO: We can probably make this step faster
264
+ if self.config.val_set_balancer:
265
+ rejected_df = val_df[val_df.status == 'REJECTED']
266
+ num_rejected = len(rejected_df)
267
+ accepted_df = val_df[val_df.status == 'ACCEPTED']
268
+ num_accepted = len(accepted_df)
269
+ if num_rejected < num_accepted:
270
+ accepted_df = accepted_df.sample(frac=1.0, random_state=RANDOM_STATE) # shuffle(accepted_df)
271
+ accepted_df = accepted_df[:num_rejected]
272
+ else:
273
+ rejected_df = rejected_df.sample(frac=1.0, random_state=RANDOM_STATE) # shuffle(rejected_df)
274
+ rejected_df = rejected_df[:num_accepted]
275
+ val_df = pd.concat([rejected_df, accepted_df])
276
+
277
+ return [
278
+ datasets.SplitGenerator(
279
+ name=datasets.Split.TRAIN,
280
+ gen_kwargs=dict( # these kwargs are passed to _generate_examples
281
+ df=train_df,
282
+ json_dir=json_dir,
283
+ split='train',
284
+ ),
285
+ ),
286
+ datasets.SplitGenerator(
287
+ name=datasets.Split.VALIDATION,
288
+ gen_kwargs=dict(
289
+ df=val_df,
290
+ json_dir=json_dir,
291
+ split='val',
292
+ ),
293
+ ),
294
+ ]
295
+
296
+ def _generate_examples(self, df, json_dir, split):
297
+ """ Yields examples by loading JSON files containing patent applications. """
298
+
299
+ # NOTE: df.itertuples() is way faster than df.iterrows()
300
+ for id_, x in enumerate(df.itertuples()):
301
+
302
+ # JSON files are named by application number (unique)
303
+ application_year = str(x.filing_date.year)
304
+ application_number = x.application_number
305
+ filepath = os.path.join(json_dir, application_year, application_number + '.json')
306
+ try:
307
+ with open(filepath, 'r') as f:
308
+ patent = json.load(f)
309
+ except Exception as e:
310
+ print('------------')
311
+ print(f'ERROR WITH {filepath}\n')
312
+ print(repr(e))
313
+ print()
314
+ yield id_, {k: "error" for k in _FEATURES}
315
+
316
+ # Most up-to-date-decision in meta dataframe
317
+ decision = x.decision
318
+ yield id_, {
319
+ "patent_number": application_number,
320
+ "decision": patent["decision"], # decision,
321
+ "title": patent["title"],
322
+ "abstract": patent["abstract"],
323
+ "claims": patent["claims"],
324
+ "description": patent["full_description"],
325
+ "background": patent["background"],
326
+ "summary": patent["summary"],
327
+ "cpc_label": patent["main_cpc_label"],
328
+ 'filing_date': patent['filing_date'],
329
+ 'patent_issue_date': patent['patent_issue_date'],
330
+ 'date_published': patent['date_published'],
331
+ 'examiner_id': patent['examiner_id'],
332
+ "ipc_label": patent["main_ipcr_label"],
333
+ }