egm517 commited on
Commit
481bcd6
1 Parent(s): ffe6dcb

Delete hupd.py

Browse files
Files changed (1) hide show
  1. hupd.py +0 -339
hupd.py DELETED
@@ -1,339 +0,0 @@
1
- """
2
- The Harvard USPTO Patent Dataset (HUPD) is a large-scale, well-structured, and multi-purpose corpus
3
- of English-language patent applications filed to the United States Patent and Trademark Office (USPTO)
4
- between 2004 and 2018. With more than 4.5 million patent documents, HUPD is two to three times larger
5
- than comparable corpora. Unlike other NLP patent datasets, HUPD contains the inventor-submitted versions
6
- of patent applications, not the final versions of granted patents, allowing us to study patentability at
7
- the time of filing using NLP methods for the first time.
8
- """
9
-
10
- from __future__ import absolute_import, division, print_function
11
-
12
- import os
13
- import datetime
14
- import pandas as pd
15
- import numpy as np
16
- from pathlib import Path
17
- try:
18
- import ujson as json
19
- except:
20
- import json
21
-
22
- import datasets
23
-
24
-
25
- _CITATION = """\
26
- @InProceedings{suzgun2021:hupd,
27
- title = {The Harvard USPTO Patent Dataset},
28
- authors={Mirac Suzgun and Suproteem Sarkar and Luke Melas-Kyriazi and Scott Kominers and Stuart Shieber},
29
- year={2021}
30
- }
31
- """
32
-
33
- _DESCRIPTION = """
34
- The Harvard USPTO Patent Dataset (HUPD) is a large-scale, well-structured, and multi-purpose corpus
35
- of English-language patent applications filed to the United States Patent and Trademark Office (USPTO)
36
- between 2004 and 2018. With more than 4.5 million patent documents, HUPD is two to three times larger
37
- than comparable corpora. Unlike other NLP patent datasets, HUPD contains the inventor-submitted versions
38
- of patent applications, not the final versions of granted patents, allowing us to study patentability at
39
- the time of filing using NLP methods for the first time.
40
- """
41
-
42
- RANDOM_STATE = 1729
43
-
44
- _FEATURES = [
45
- "patent_number",
46
- "decision",
47
- "title",
48
- "abstract",
49
- "claims",
50
- "background",
51
- "summary",
52
- "description",
53
- "cpc_label",
54
- "ipc_label",
55
- "filing_date",
56
- "patent_issue_date",
57
- "date_published",
58
- "examiner_id"
59
- ]
60
-
61
-
62
- def str_to_date(s):
63
- """A helper function to convert strings to dates"""
64
- return datetime.datetime.strptime(s, '%Y-%m-%d')
65
-
66
-
67
- class PatentsConfig(datasets.BuilderConfig):
68
- """BuilderConfig for Patents"""
69
-
70
- def __init__(
71
- self,
72
- metadata_url: str,
73
- data_url: str,
74
- data_dir: str,
75
- ipcr_label: str = None,
76
- cpc_label: str = None,
77
- train_filing_start_date: str = None,
78
- train_filing_end_date: str = None,
79
- val_filing_start_date: str = None,
80
- val_filing_end_date: str = None,
81
- query_string: str = None,
82
- val_set_balancer=False,
83
- uniform_split=False,
84
- force_extract=False,
85
- **kwargs
86
- ):
87
- """
88
- If train_filing_end_date is None, then a random train-val split will be used. If it is
89
- specified, then the specified date range will be used for the split. If train_filing_end_date
90
- if specified and val_filing_start_date is not specifed, then val_filing_start_date defaults to
91
- train_filing_end_date.
92
- Args:
93
- metadata_url: `string`, url from which to download the metadata file
94
- data_url: `string`, url from which to download the json files
95
- data_dir: `string`, folder (in cache) in which downloaded json files are stored
96
- ipcr_label: International Patent Classification code
97
- cpc_label: Cooperative Patent Classification code
98
- train_filing_start_date: Start date for patents in train set (and val set if random split is used)
99
- train_filing_end_date: End date for patents in train set
100
- val_filing_start_date: Start date for patents in val set
101
- val_filing_end_date: End date for patents in val set (and train set if random split is used)
102
- force_extract: Extract only the relevant years if this parameter is used.
103
- **kwargs: keyword arguments forwarded to super
104
- """
105
- super().__init__(**kwargs)
106
- self.metadata_url = metadata_url
107
- self.data_url = data_url
108
- self.data_dir = data_dir
109
- self.ipcr_label = ipcr_label
110
- self.cpc_label = cpc_label
111
- self.train_filing_start_date = train_filing_start_date
112
- self.train_filing_end_date = train_filing_end_date
113
- self.val_filing_start_date = val_filing_start_date
114
- self.val_filing_end_date = val_filing_end_date
115
- self.query_string = query_string
116
- self.val_set_balancer = val_set_balancer
117
- self.uniform_split = uniform_split
118
- self.force_extract = force_extract
119
-
120
-
121
- class Patents(datasets.GeneratorBasedBuilder):
122
- _DESCRIPTION
123
-
124
- VERSION = datasets.Version("1.0.2")
125
-
126
- # This is an example of a dataset with multiple configurations.
127
- # If you don't want/need to define several sub-sets in your dataset,
128
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
129
- BUILDER_CONFIG_CLASS = PatentsConfig
130
- BUILDER_CONFIGS = [
131
- PatentsConfig(
132
- name="sample",
133
- description="Patent data from January 2016, for debugging",
134
- metadata_url="https://huggingface.co/datasets/HUPD/hupd/resolve/main/hupd_metadata_jan16_2022-02-22.feather",
135
- data_url="https://huggingface.co/datasets/HUPD/hupd/resolve/main/data/sample-jan-2016.tar.gz",
136
- data_dir="sample", # this will unpack to data/sample/2016
137
- ),
138
- PatentsConfig(
139
- name="2016to2018",
140
- description="Patent data from 2016 to 2018, for debugging",
141
- metadata_url="https://huggingface.co/datasets/hupd_augmented/resolve/main/hupd_metadata_2016_to_2018.feather",
142
- data_url="https://huggingface.co/datasets/hupd_augmented/resolve/main/data/2018.tar.gz",
143
- data_dir="sample", # this will unpack to data/sample/2018
144
- ),
145
- PatentsConfig(
146
- name="all",
147
- description="Patent data from all years (2004-2018)",
148
- metadata_url="https://huggingface.co/datasets/HUPD/hupd/resolve/main/hupd_metadata_2022-02-22.feather",
149
- data_url="https://huggingface.co/datasets/HUPD/hupd/resolve/main/data/all-years.tar",
150
- data_dir="data", # this will unpack to data/{year}
151
- ),
152
- ]
153
-
154
- def _info(self):
155
- return datasets.DatasetInfo(
156
- # This is the description that will appear on the datasets page.
157
- description=_DESCRIPTION,
158
- # This defines the different columns of the dataset and their types
159
- features=datasets.Features(
160
- {k: datasets.Value("string") for k in _FEATURES}
161
- ),
162
- # If there's a common (input, target) tuple from the features,
163
- # specify them here. They'll be used if as_supervised=True in
164
- # builder.as_dataset.
165
- supervised_keys=("claims", "decision"),
166
- homepage="https://github.com/suzgunmirac/hupd",
167
- citation=_CITATION,
168
- )
169
-
170
- def _split_generators(self, dl_manager: datasets.DownloadManager):
171
- """Returns SplitGenerators."""
172
- print(f'Loading dataset with config: {self.config}')
173
-
174
- # Download metadata
175
- # NOTE: Metadata is stored as a Pandas DataFrame in Apache Feather format
176
- metadata_url = self.config.metadata_url
177
- metadata_file = dl_manager.download_and_extract(self.config.metadata_url)
178
- print(f'Using metadata file: {metadata_file}')
179
-
180
- # Download data
181
- # NOTE: The extracted path contains a subfolder, data_dir. This directory holds
182
- # a large number of json files (one json file per patent application).
183
- download_dir = dl_manager.download_and_extract(self.config.data_url)
184
- json_dir = os.path.join(download_dir, self.config.data_dir)
185
-
186
- # Load metadata file
187
- print(f'Reading metadata file: {metadata_file}')
188
- if metadata_url.endswith('.feather'):
189
- df = pd.read_feather(metadata_file)
190
- elif metadata_url.endswith('.csv'):
191
- df = pd.read_csv(metadata_file)
192
- elif metadata_url.endswith('.tsv'):
193
- df = pd.read_csv(metadata_file, delimiter='\t')
194
- elif metadata_url.endswith('.pickle'):
195
- df = pd.read_pickle(metadata_file)
196
- else:
197
- raise ValueError(f'Metadata file invalid: {metadata_url}')
198
-
199
- # Filter based on ICPR / CPC label
200
- if self.config.ipcr_label:
201
- print(f'Filtering by IPCR label: {self.config.ipcr_label}')
202
- df = df[df['main_ipcr_label'].str.startswith(self.config.ipcr_label)]
203
- elif self.config.cpc_label:
204
- print(f'Filtering by CPC label: {self.config.cpc_label}')
205
- df = df[df['main_cpc_label'].str.startswith(self.config.cpc_label)]
206
-
207
- # Filter metadata based on arbitrary query string
208
- if self.config.query_string:
209
- df = df.query(self.config.query_string)
210
-
211
- if self.config.force_extract:
212
- if self.config.name == 'all':
213
- if self.config.train_filing_start_date and self.config.val_filing_end_date:
214
- if self.config.train_filing_end_date and self.config.val_filing_start_date:
215
- training_year_range = set(range(int(self.config.train_filing_start_date[:4]), int(self.config.train_filing_end_date[:4]) + 1))
216
- validation_year_range = set(range(int(self.config.val_filing_start_date[:4]), int(self.config.val_filing_end_date[:4]) + 1))
217
- full_year_range = training_year_range.union(validation_year_range)
218
- else:
219
- full_year_range = set(range(int(self.config.train_filing_start_date[:4]), int(self.config.val_filing_end_date[:4]) + 1))
220
- else:
221
- full_year_range = set(range(2004, 2019))
222
-
223
-
224
- import tarfile
225
- for year in full_year_range:
226
- tar_file_path = f'{json_dir}/{year}.tar.gz'
227
- print(f'Extracting {tar_file_path}')
228
- # open file
229
- tar_file = tarfile.open(tar_file_path)
230
- # extracting file
231
- tar_file.extractall(f'{json_dir}')
232
- tar_file.close()
233
-
234
- # Train-validation split (either uniform or by date)
235
- if self.config.uniform_split:
236
-
237
- # Assumes that training_start_data < val_end_date
238
- if self.config.train_filing_start_date:
239
- df = df[df['filing_date'] >= self.config.train_filing_start_date]
240
- if self.config.val_filing_end_date:
241
- df = df[df['filing_date'] <= self.config.val_filing_end_date]
242
- df = df.sample(frac=1.0, random_state=RANDOM_STATE)
243
- num_train_samples = int(len(df) * 0.85)
244
- train_df = df.iloc[0:num_train_samples]
245
- val_df = df.iloc[num_train_samples:-1]
246
-
247
- else:
248
-
249
- # Check
250
- if not (self.config.train_filing_start_date and self.config.train_filing_end_date and
251
- self.config.val_filing_start_date and self.config.train_filing_end_date):
252
- raise ValueError("Please either use uniform_split or specify your exact \
253
- training and validation split dates.")
254
-
255
- # Does not assume that training_start_data < val_end_date
256
- print(f'Filtering train dataset by filing start date: {self.config.train_filing_start_date}')
257
- print(f'Filtering train dataset by filing end date: {self.config.train_filing_end_date}')
258
- print(f'Filtering val dataset by filing start date: {self.config.val_filing_start_date}')
259
- print(f'Filtering val dataset by filing end date: {self.config.val_filing_end_date}')
260
- train_df = df[
261
- (df['filing_date'] >= self.config.train_filing_start_date) &
262
- (df['filing_date'] < self.config.train_filing_end_date)
263
- ]
264
- val_df = df[
265
- (df['filing_date'] >= self.config.val_filing_start_date) &
266
- (df['filing_date'] < self.config.val_filing_end_date)
267
- ]
268
-
269
- # TODO: We can probably make this step faster
270
- if self.config.val_set_balancer:
271
- rejected_df = val_df[val_df.status == 'REJECTED']
272
- num_rejected = len(rejected_df)
273
- accepted_df = val_df[val_df.status == 'ACCEPTED']
274
- num_accepted = len(accepted_df)
275
- if num_rejected < num_accepted:
276
- accepted_df = accepted_df.sample(frac=1.0, random_state=RANDOM_STATE) # shuffle(accepted_df)
277
- accepted_df = accepted_df[:num_rejected]
278
- else:
279
- rejected_df = rejected_df.sample(frac=1.0, random_state=RANDOM_STATE) # shuffle(rejected_df)
280
- rejected_df = rejected_df[:num_accepted]
281
- val_df = pd.concat([rejected_df, accepted_df])
282
-
283
- return [
284
- datasets.SplitGenerator(
285
- name=datasets.Split.TRAIN,
286
- gen_kwargs=dict( # these kwargs are passed to _generate_examples
287
- df=train_df,
288
- json_dir=json_dir,
289
- split='train',
290
- ),
291
- ),
292
- datasets.SplitGenerator(
293
- name=datasets.Split.VALIDATION,
294
- gen_kwargs=dict(
295
- df=val_df,
296
- json_dir=json_dir,
297
- split='val',
298
- ),
299
- ),
300
- ]
301
-
302
- def _generate_examples(self, df, json_dir, split):
303
- """ Yields examples by loading JSON files containing patent applications. """
304
-
305
- # NOTE: df.itertuples() is way faster than df.iterrows()
306
- for id_, x in enumerate(df.itertuples()):
307
-
308
- # JSON files are named by application number (unique)
309
- application_year = str(x.filing_date.year)
310
- application_number = x.application_number
311
- filepath = os.path.join(json_dir, application_year, application_number + '.json')
312
- try:
313
- with open(filepath, 'r') as f:
314
- patent = json.load(f)
315
- except Exception as e:
316
- print('------------')
317
- print(f'ERROR WITH {filepath}\n')
318
- print(repr(e))
319
- print()
320
- yield id_, {k: "error" for k in _FEATURES}
321
-
322
- # Most up-to-date-decision in meta dataframe
323
- decision = x.decision
324
- yield id_, {
325
- "patent_number": application_number,
326
- "decision": patent["decision"], # decision,
327
- "title": patent["title"],
328
- "abstract": patent["abstract"],
329
- "claims": patent["claims"],
330
- "description": patent["full_description"],
331
- "background": patent["background"],
332
- "summary": patent["summary"],
333
- "cpc_label": patent["main_cpc_label"],
334
- 'filing_date': patent['filing_date'],
335
- 'patent_issue_date': patent['patent_issue_date'],
336
- 'date_published': patent['date_published'],
337
- 'examiner_id': patent['examiner_id'],
338
- "ipc_label": patent["main_ipcr_label"],
339
- }