lukemelas commited on
Commit
8b6b3f1
1 Parent(s): 905f995

How does this work? The docs should really be better

Browse files
.gitattributes CHANGED
@@ -25,3 +25,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ json-files-Jan2016.tar filter=lfs diff=lfs merge=lfs -text
29
+ metadata--Jan2016--2021-02-10.feather filter=lfs diff=lfs merge=lfs -text
json-files-Jan2016.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a7d7923941e39255112d2b40a40e8dae8579d9150459c1f0599ffe8a4cfb5a5
3
+ size 2024540160
metadata--Jan2016--2021-02-10.feather ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ce14560b1f610436f0f6810e38b28a71803aa2b995b27220578ed870e8bc620
3
+ size 10639266
test-dataset-debug.py ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO: Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import json
6
+ import os
7
+ import datetime
8
+ import pandas as pd
9
+ import numpy as np
10
+ from pathlib import Path
11
+ # from sklearn.utils import shuffle
12
+
13
+ import datasets
14
+
15
+
16
+ # TODO: Add BibTeX citation
17
+ _CITATION = """\
18
+ @InProceedings{huggingface:dataset,
19
+ title = {A great new dataset},
20
+ authors={huggingface, Inc.
21
+ },
22
+ year={2020}
23
+ }
24
+ """
25
+
26
+ # TODO: Add description of the dataset here
27
+ _DESCRIPTION = """TODO: Add description"""
28
+
29
+ # URLs for production
30
+ _METADATA_URL = "https://patentdiag.blob.core.windows.net/patent-data/metadata-2021-02-10.feather"
31
+ # _METADATA_URL = "https://patentdiag.blob.core.windows.net/patent-data/metadata-2021-01-21.feather"
32
+ _DATA_URL = "https://patentdiag.blob.core.windows.net/patent-data/distilled-2021-01-07.tar"
33
+ _DATA_SUBFOLDER_NAME = 'distilled'
34
+
35
+ # # URLs for debugging
36
+ # _METADATA_URL = _DEBUG_METADATA_URL = "https://patentdiag.blob.core.windows.net/patent-data/metadata_debug-2021-02-10.feather"
37
+ # _DATA_URL = _DEBUG_DATA_URL = "https://patentdiag.blob.core.windows.net/patent-data/distilled_debug-2021-01-07.tar"
38
+ # _DATA_SUBFOLDER_NAME = _DATA_SUBFOLDER_NAME = 'debug_distilled'
39
+
40
+ RANDOM_STATE = 1729
41
+
42
+
43
+ # Names of features
44
+ _FEATURES = [
45
+ "patent_number",
46
+ "decision",
47
+ "title",
48
+ "abstract",
49
+ "claims",
50
+ "background",
51
+ "summary",
52
+ "description",
53
+ "cpc_label",
54
+ "ipc_label",
55
+ "filing_date",
56
+ "patent_issue_date",
57
+ "date_published",
58
+ "examiner_id"
59
+ ]
60
+
61
+
62
+ def str_to_date(s):
63
+ """A helper function to convert strings to dates"""
64
+ return datetime.datetime.strptime(s, '%Y-%m-%d')
65
+
66
+
67
+ class PatentsConfig(datasets.BuilderConfig):
68
+ """BuilderConfig for Patents"""
69
+
70
+ def __init__(
71
+ self,
72
+ ipcr_label: str = None, # 'G06F',
73
+ cpc_label: str = None, # 'G06F',
74
+ train_filing_start_date: str = None,
75
+ train_filing_end_date: str = None,
76
+ val_filing_start_date: str = None,
77
+ val_filing_end_date: str = None,
78
+ query_string: str = None,
79
+ val_set_balancer=False,
80
+ uniform_split=False,
81
+ train_only=False,
82
+ **kwargs
83
+ ):
84
+ """
85
+ If train_filing_end_date is None, then a random train-val split will be used. If it is
86
+ specified, then the specified date range will be used for the split. If train_filing_end_date
87
+ if specified and val_filing_start_date is not specifed, then val_filing_start_date defaults to
88
+ train_filing_end_date.
89
+
90
+ Args:
91
+ ipcr_label: International Patent Classification code
92
+ cpc_label: Cooperative Patent Classification code
93
+ train_filing_start_date: Start date for patents in train set (and val set if random split is used)
94
+ train_filing_end_date: End date for patents in train set
95
+ val_filing_start_date: Start date for patents in val set
96
+ val_filing_end_date: End date for patents in val set (and train set if random split is used)
97
+ **kwargs: keyword arguments forwarded to super.
98
+ """
99
+ super().__init__(**kwargs)
100
+ self.ipcr_label = ipcr_label
101
+ self.cpc_label = cpc_label
102
+ self.train_filing_start_date = train_filing_start_date
103
+ self.train_filing_end_date = train_filing_end_date
104
+ self.val_filing_start_date = val_filing_start_date
105
+ self.val_filing_end_date = val_filing_end_date
106
+ self.query_string = query_string
107
+ self.val_set_balancer = val_set_balancer
108
+ self.uniform_split = uniform_split
109
+ self.train_only = train_only
110
+
111
+
112
+ class Patents(datasets.GeneratorBasedBuilder):
113
+ """TODO: Add description"""
114
+
115
+ VERSION = datasets.Version("1.0.1")
116
+
117
+ # This is an example of a dataset with multiple configurations.
118
+ # If you don't want/need to define several sub-sets in your dataset,
119
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
120
+ BUILDER_CONFIG_CLASS = PatentsConfig
121
+ # BUILDER_CONFIGS = [
122
+ # PatentsConfig(name="my_dataset_" + size, description="A small dataset", data_size=size)
123
+ # for size in ["small", "medium", "large"]
124
+ # ]
125
+
126
+ def _info(self):
127
+ return datasets.DatasetInfo(
128
+ # This is the description that will appear on the datasets page.
129
+ description=_DESCRIPTION,
130
+ # This defines the different columns of the dataset and their types
131
+ features=datasets.Features(
132
+ {k: datasets.Value("string") for k in _FEATURES}
133
+ ),
134
+ # If there's a common (input, target) tuple from the features,
135
+ # specify them here. They'll be used if as_supervised=True in
136
+ # builder.as_dataset.
137
+ supervised_keys=("claims", "decision"),
138
+ # TODO: Homepage of the dataset for documentation
139
+ homepage="https://huggingface.co/great-new-dataset",
140
+ citation=_CITATION,
141
+ )
142
+
143
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
144
+ """Returns SplitGenerators."""
145
+ print(f'Loading dataset with config: {self.config}')
146
+
147
+ # Download metadata
148
+ # NOTE: data_files is a path to a pickled pandas DataFrame
149
+ if self.config.data_files is None:
150
+ print(f'Loading / downloading metadata file: {_METADATA_URL}')
151
+ metadata_file = dl_manager.download_and_extract(_METADATA_URL)
152
+ else:
153
+ print(f'Using metadata file: {self.config.data_files}')
154
+ metadata_file = Path(self.config.data_files)
155
+
156
+ # Download data
157
+ # NOTE: data_dir is a path to a directory of json files, with one
158
+ # json file per patent application
159
+ if self.config.data_dir is None:
160
+ print('Loading / downloading data. This is a big file (360GB)!')
161
+ json_dir = Path(dl_manager.download_and_extract(_DATA_URL))
162
+ # NOTE: The extracted path contains a subfolder
163
+ json_dir = json_dir / _DATA_SUBFOLDER_NAME
164
+ else:
165
+ json_dir = Path(self.config.data_dir)
166
+
167
+ # Load metadata file
168
+ print(f'Reading metadata file: {metadata_file}')
169
+ df = pd.read_feather(metadata_file) # pd.read_pickle(metadata_file) #
170
+
171
+ # Filter based on ICPR / CPC label
172
+ if self.config.ipcr_label:
173
+ print(f'Filtering by IPCR label: {self.config.ipcr_label}')
174
+ df = df[df['main_ipcr_label'].str.startswith(self.config.ipcr_label)]
175
+ elif self.config.cpc_label:
176
+ print(f'Filtering by CPC label: {self.config.cpc_label}')
177
+ df = df[df['main_cpc_label'].str.startswith(self.config.cpc_label)]
178
+
179
+ # Filter metadata based on arbitrary query string
180
+ # TODO(suproteem): remove for production
181
+ if self.config.query_string:
182
+ df = df.query(self.config.query_string)
183
+
184
+ # Return only one dataset
185
+ if self.config.train_only:
186
+ if self.config.train_filing_start_date:
187
+ print(f'Filtering by train filing start date: {self.config.train_filing_start_date}')
188
+ df = df[df['filing_date'] >= self.config.train_filing_start_date]
189
+ if self.config.train_filing_end_date:
190
+ print(f'Filtering by train filing end date: {self.config.train_filing_end_date}')
191
+ df = df[df['filing_date'] <= self.config.train_filing_end_date]
192
+
193
+ return [
194
+ datasets.SplitGenerator(
195
+ name=datasets.Split.TRAIN,
196
+ gen_kwargs=dict( # kwargs passed to _generate_examples
197
+ df=df,
198
+ json_dir=json_dir,
199
+ split='train',
200
+ ),
201
+ )
202
+ ]
203
+
204
+ # Train-validation split (either uniform or by date)
205
+ if self.config.uniform_split:
206
+
207
+ # Assumes that training_start_data < val_end_date
208
+ if self.config.train_filing_start_date:
209
+ df = df[df['filing_date'] >= self.config.train_filing_start_date]
210
+ if self.config.val_filing_end_date:
211
+ df = df[df['filing_date'] <= self.config.val_filing_end_date]
212
+ df = df.sample(frac=1.0, random_state=RANDOM_STATE)
213
+ num_train_samples = int(len(df) * 0.85)
214
+ train_df = df.iloc[0:num_train_samples]
215
+ val_df = df.iloc[num_train_samples:-1]
216
+
217
+ else:
218
+
219
+ # Does not assume that training_start_data < val_end_date
220
+ if self.config.train_filing_start_date:
221
+ print(f'Filtering by train filing start date: {self.config.train_filing_start_date}')
222
+ tdf = df[df['filing_date'] >= self.config.train_filing_start_date]
223
+ if self.config.train_filing_end_date:
224
+ print(f'Filtering by train filing end date: {self.config.train_filing_end_date}')
225
+ train_df = tdf[tdf['filing_date'] <= self.config.train_filing_end_date]
226
+
227
+ if self.config.val_filing_start_date:
228
+ print(f'Filtering by val filing start date: {self.config.val_filing_start_date}')
229
+ vdf = df[df['filing_date'] >= self.config.val_filing_start_date]
230
+ if self.config.val_filing_end_date:
231
+ print(f'Filtering by val filing end date: {self.config.val_filing_end_date}')
232
+ val_df = vdf[vdf['filing_date'] <= self.config.val_filing_end_date]
233
+
234
+ # TODO: Can make this step faster
235
+ if self.config.val_set_balancer:
236
+ rejected_df = val_df[val_df.status == 'REJECTED']
237
+ num_rejected = len(rejected_df)
238
+ accepted_df = val_df[val_df.status == 'ACCEPTED']
239
+ num_accepted = len(accepted_df)
240
+ if num_rejected < num_accepted:
241
+ accepted_df = accepted_df.sample(frac=1.0, random_state=RANDOM_STATE) # shuffle(accepted_df)
242
+ accepted_df = accepted_df[:num_rejected]
243
+ else:
244
+ rejected_df = rejected_df.sample(frac=1.0, random_state=RANDOM_STATE) # shuffle(rejected_df)
245
+ rejected_df = rejected_df[:num_accepted]
246
+ val_df = pd.concat([rejected_df, accepted_df])
247
+
248
+ return [
249
+ datasets.SplitGenerator(
250
+ name=datasets.Split.TRAIN,
251
+ gen_kwargs=dict( # kwargs passed to _generate_examples
252
+ df=train_df,
253
+ json_dir=json_dir,
254
+ split='train',
255
+ ),
256
+ ),
257
+ datasets.SplitGenerator(
258
+ name=datasets.Split.VALIDATION,
259
+ gen_kwargs=dict(
260
+ df=val_df,
261
+ json_dir=json_dir,
262
+ split='val',
263
+ ),
264
+ ),
265
+ ]
266
+
267
+ def _generate_examples(self, df, json_dir, split):
268
+ """ Yields examples by loading JSON files containing patent applications. """
269
+
270
+ # NOTE: df.itertuples() is way faster than df.iterrows()
271
+ for id_, x in enumerate(df.itertuples()):
272
+
273
+ # JSON files are named by application number (unique)
274
+ application_number = x.application_number
275
+ filepath = json_dir / (application_number + '.json')
276
+ try:
277
+ with open(filepath, 'r') as f:
278
+ patent = json.load(f)
279
+ except Exception as e:
280
+ print('------------')
281
+ print(f'ERROR WITH {filepath}\n')
282
+ print(repr(e))
283
+ print()
284
+ yield id_, {k: "error" for k in _FEATURES}
285
+
286
+ # Most up-to-date-decision in meta dataframe
287
+ decision = x.decision
288
+ yield id_, {
289
+ "patent_number": application_number,
290
+ "decision": decision,
291
+ "title": patent["title"],
292
+ "abstract": patent["abstract"],
293
+ "claims": patent["claims"],
294
+ "description": patent["full_description"],
295
+ "background": patent["background"],
296
+ "summary": patent["summary"],
297
+ "cpc_label": patent["main_cpc_label"],
298
+ 'filing_date': patent['filing_date'],
299
+ 'patent_issue_date': patent['patent_issue_date'],
300
+ 'date_published': patent['date_published'],
301
+ 'examiner_id': patent['examiner_id'],
302
+ "ipc_label": patent["main_ipcr_label"],
303
+ # "all_cpc_labels": patent["cpc_labels"], # these are lists, ignoring for now
304
+ # 'inventor_list': patent['inventor_list'], # these are lists, ignoring for now
305
+ }