christopherastone commited on
Commit
785eaab
1 Parent(s): cd5ca73

Simplify paper IDs, include raw proofs and paper tags

Browse files
Files changed (5) hide show
  1. prooflang.py +27 -31
  2. proofs.zip +2 -2
  3. raw.zip +3 -0
  4. sentences.zip +2 -2
  5. tags.zip +3 -0
prooflang.py CHANGED
@@ -11,7 +11,6 @@
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
  """The ProofLang Corpus of arXiv Proofs"""
16
 
17
 
@@ -21,18 +20,16 @@ import os
21
  import datasets
22
 
23
 
24
- # TODO: Add BibTeX citation
25
- # Find for instance the citation on arxiv or on the dataset repo/website
26
  _CITATION = """\
27
- @unpublished{prooflang:dataset,
28
- title = {ProofLang: the Language of arXiv Proofs},
29
- author = {Henry Hammer and Nanako Noda and Christopher A. Stone},
30
- year = {2023}
 
 
31
  }
32
  """
33
 
34
- # TODO: Add description of the dataset here
35
- # You can copy an official description
36
  _DESCRIPTION = """\ The ProofLang Corpus includes over three million
37
  English-language proofs—558 million words—mechanically extracted from the papers
38
  (Math, CS, Physics, etc.) posted on arXiv.org between 1992 and 2020. The focus
@@ -44,30 +41,21 @@ write informal proofs. It is also amenable to statistical analyses and to
44
  experiments with Natural Language Processing (NLP) techniques.
45
  """
46
 
47
- # TODO: Add a link to an official homepage for the dataset here
48
- _HOMEPAGE = ""
49
 
50
- # TODO: Add the licence for the dataset here if you can find it
51
  _LICENSE = "CC-BY 4.0"
52
 
53
- # TODO: Add link to the official dataset URLs here
54
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
55
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
56
  _URLS = {
57
- # "proofs": "https://huggingface.co/great-new-dataset-proofs.zip",
58
- # "sentences": "https://huggingface.co/great-new-dataset-sentences.zip",
59
- # "proofs": "cleanproofs19.tsv",
60
- # "sentences": "sent19.tsv"
61
  "proofs": "proofs.zip",
62
  "sentences": "sentences.zip",
 
 
63
  }
64
 
65
-
66
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
67
  class ArxivProofs(datasets.GeneratorBasedBuilder):
68
- """TODO: Short description of my dataset."""
69
 
70
- VERSION = datasets.Version("0.5.2")
71
 
72
  # This is an example of a dataset with multiple configurations.
73
  # If you don't want/need to define several sub-sets in your dataset,
@@ -83,26 +71,33 @@ class ArxivProofs(datasets.GeneratorBasedBuilder):
83
  BUILDER_CONFIGS = [
84
  datasets.BuilderConfig(name="proofs", version=VERSION, description="One proof per line"),
85
  datasets.BuilderConfig(name="sentences", version=VERSION, description="One sentence per line"),
 
 
86
  ]
87
 
88
  DEFAULT_CONFIG_NAME = "proofs" # It's not mandatory to have a default configuration. Just use one if it make sense.
89
 
90
  def _info(self):
91
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
92
- if self.config.name == "proofs": # This is the name of the configuration selected in BUILDER_CONFIGS above
93
  features = datasets.Features(
94
  {
95
- "fileID": datasets.Value("string"),
96
  "proof": datasets.Value("string"),
97
- # These are the features of your dataset like images, labels ...
 
 
 
 
 
 
98
  }
99
  )
100
  else: # This is an example to show how to have different features for "proofs" and "sentences"
101
  features = datasets.Features(
102
  {
103
- "fileID": datasets.Value("string"),
104
  "sentence": datasets.Value("string"),
105
- # These are the features of your dataset like images, labels ...
106
  }
107
  )
108
  return datasets.DatasetInfo(
@@ -136,8 +131,8 @@ class ArxivProofs(datasets.GeneratorBasedBuilder):
136
  name=datasets.Split.TRAIN,
137
  # These kwargs will be passed to _generate_examples
138
  gen_kwargs={
139
- "filepath": data_dir, # os.path.join(data_dir, "train.jsonl"), # data_file,
140
- "split": "train",
141
  },
142
  ),
143
  # datasets.SplitGenerator(
@@ -162,8 +157,9 @@ class ArxivProofs(datasets.GeneratorBasedBuilder):
162
  def _generate_examples(self, filepath, split):
163
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
164
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
 
165
  with open(os.path.join(filepath, self.config.name + ".tsv"), encoding="utf-8") as f:
166
- reader = csv.DictReader(f, delimiter='\t')
167
  for key, data in enumerate(reader):
168
  yield key, data
169
  # if self.config.name == "proofs":
 
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
 
14
  """The ProofLang Corpus of arXiv Proofs"""
15
 
16
 
 
20
  import datasets
21
 
22
 
 
 
23
  _CITATION = """\
24
+ @inproceedings{prooflang:dataset,
25
+ title = "{ProofLang: the Language of arXiv Proofs}",
26
+ booktitle = "{Intelligent Computer Mathematics (CICM 2023)}",
27
+ author = "{Henry Hammer and Nanako Noda and Christopher A. Stone}",
28
+ year = {2023},
29
+ note = {To appear}
30
  }
31
  """
32
 
 
 
33
  _DESCRIPTION = """\ The ProofLang Corpus includes over three million
34
  English-language proofs—558 million words—mechanically extracted from the papers
35
  (Math, CS, Physics, etc.) posted on arXiv.org between 1992 and 2020. The focus
 
41
  experiments with Natural Language Processing (NLP) techniques.
42
  """
43
 
44
+ _HOMEPAGE = "https://huggingface.co/datasets/proofcheck/prooflang"
 
45
 
 
46
  _LICENSE = "CC-BY 4.0"
47
 
 
 
 
48
  _URLS = {
 
 
 
 
49
  "proofs": "proofs.zip",
50
  "sentences": "sentences.zip",
51
+ "raw": "raw.zip",
52
+ "tags": "tags.zip",
53
  }
54
 
 
 
55
  class ArxivProofs(datasets.GeneratorBasedBuilder):
56
+ """English text from proofs found in arXiv preprints."""
57
 
58
+ VERSION = datasets.Version("0.6.0")
59
 
60
  # This is an example of a dataset with multiple configurations.
61
  # If you don't want/need to define several sub-sets in your dataset,
 
71
  BUILDER_CONFIGS = [
72
  datasets.BuilderConfig(name="proofs", version=VERSION, description="One proof per line"),
73
  datasets.BuilderConfig(name="sentences", version=VERSION, description="One sentence per line"),
74
+ datasets.BuilderConfig(name="raw", version=VERSION, description="One (less agressively cleaned) proof per line"),
75
+ datasets.BuilderConfig(name="tags", version=VERSION, description="arXiv subject tags for each paper"),
76
  ]
77
 
78
  DEFAULT_CONFIG_NAME = "proofs" # It's not mandatory to have a default configuration. Just use one if it make sense.
79
 
80
  def _info(self):
81
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
82
+ if self.config.name in {"proofs", "raw"}: # This is the name of the configuration selected in BUILDER_CONFIGS above
83
  features = datasets.Features(
84
  {
85
+ "paper": datasets.Value("string"),
86
  "proof": datasets.Value("string"),
87
+ }
88
+ )
89
+ elif self.config.name == "tags": # This is an example to show how to have different features for "proofs" and "sentences"
90
+ features = datasets.Features(
91
+ {
92
+ "paper": datasets.Value("string"),
93
+ "tags": datasets.Value("string"),
94
  }
95
  )
96
  else: # This is an example to show how to have different features for "proofs" and "sentences"
97
  features = datasets.Features(
98
  {
99
+ "paper": datasets.Value("string"),
100
  "sentence": datasets.Value("string"),
 
101
  }
102
  )
103
  return datasets.DatasetInfo(
 
131
  name=datasets.Split.TRAIN,
132
  # These kwargs will be passed to _generate_examples
133
  gen_kwargs={
134
+ "filepath": data_dir,
135
+ "split": "train", # Prooflang doesn't have a train/test split.
136
  },
137
  ),
138
  # datasets.SplitGenerator(
 
157
  def _generate_examples(self, filepath, split):
158
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
159
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
160
+ csv.field_size_limit(256000) # Some of the raw proofs are slightly longer than 131072 characters
161
  with open(os.path.join(filepath, self.config.name + ".tsv"), encoding="utf-8") as f:
162
+ reader = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
163
  for key, data in enumerate(reader):
164
  yield key, data
165
  # if self.config.name == "proofs":
proofs.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a3cecb6a1fe54e2d80baafa727629786d64f7d212c08e80e2249753e81a90ccc
3
- size 723931097
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:834fc89ed12a1e2f4ac3137195dd4c181f7c7083df9703c5069ee224c5aebe73
3
+ size 722830298
raw.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22564809efc16d396ea0470d848d9bc3f77ddcc6d2ddbc455cc2ed34c08fa51a
3
+ size 753908748
sentences.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:25bcbb4637bbe587be43dedf53d124e69e55e5b70aa8c47f63d4226082d36c95
3
- size 756959311
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cef9b89dbea688e2e7f7eb5f421f8ef5168e94c408a25701f2b4e22d8f70f205
3
+ size 747935508
tags.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cb505f1b7cc9f24801f1e0601b4065c6aeec7bfa35d3bf20477e17a74b60ae8
3
+ size 1921707