File size: 5,591 Bytes
e18a867
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""MiniF2F+Informal in Isabelle

Loading script author: Sean Welleck
"""

import re
import glob
import json
import os
import datasets
from pathlib import Path


_CITATION = """\
@inproceedings{jiang2023draft,
    title={Draft, Sketch, and Prove: Guiding Formal Theorem Provers with Informal Proofs},
    author={Albert Qiaochu Jiang and Sean Welleck and Jin Peng Zhou and Timothee Lacroix and Jiacheng Liu and Wenda Li and Mateja Jamnik and Guillaume Lample and Yuhuai Wu},
    booktitle={The Eleventh International Conference on Learning Representations },
    year={2023},
    url={https://openreview.net/forum?id=SMa9EAovKMC}
}

@inproceedings{zheng2022miniff,
    title={miniF2F: a cross-system benchmark for formal Olympiad-level mathematics},
    author={Kunhao Zheng and Jesse Michael Han and Stanislas Polu},
    booktitle={International Conference on Learning Representations},
    year={2022},
    url={https://openreview.net/forum?id=9ZPegFuFTFv}
}
"""

_DESCRIPTION = """\
MiniF2F is a formal mathematics benchmark (translated across multiple formal systems) consisting of exercise statements from olympiads (AMC, AIME, IMO) as well as high-school and undergraduate maths classes.

This dataset contains formal statements in Isabelle. Each statement is paired with an informal statement and 
an informal proof, as described in Draft, Sketch, Prove [Jiang et al 2023].

The problems in this dataset use the most recent facebookresearch/miniF2F commit on July 3, 2023. 
"""

_HOMEPAGE = "https://github.com/facebookresearch/miniF2F"
_LICENSE = "MIT"

_MINIF2F_COMMIT = '5271ddec788677c815cf818a06f368ef6498a106'
_URLS = {
    "minif2f_repo": "https://github.com/facebookresearch/miniF2F/archive/%s.zip" % _MINIF2F_COMMIT
}

_ISABELLEDIR = 'miniF2F-%s/isabelle' % _MINIF2F_COMMIT
_INFORMALDIR = 'miniF2F-%s/informal' % _MINIF2F_COMMIT

_NAMES = [
    'miniF2F-isabelle-informal',
]

VERSION = "1.1.0"


class MiniF2F(datasets.GeneratorBasedBuilder):
    """MiniF2F+Informal in Isabelle"""

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name=name, version=VERSION, description=name) for name in _NAMES
    ]

    DEFAULT_CONFIG_NAME = "miniF2F-isabelle-informal"

    def _info(self):
        features = datasets.Features(
            {
                "problem_name": datasets.Value("string"),
                "formal_statement": datasets.Value("string"),
                "informal_statement": datasets.Value("string"),
                "informal_proof": datasets.Value("string"),
                "header": datasets.Value("string"),
            }
        )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        urls = _URLS
        data_dir = dl_manager.download_and_extract(urls)

        minif2f_repo_dir = data_dir['minif2f_repo']

        def extract_theorem(text):
            extract = re.findall(r"(theorem.*?:.*?(?=by |using |proof|sorry))[\s]*", text, re.DOTALL)[0].strip()
            assert extract != ''
            return extract

        def extract_header(text):
            extract = re.findall(r"(.*?)theorem.*?", text, re.DOTALL)[0]
            assert extract != ''
            return extract

        splits = {'valid': [], 'test': []}
        for split in ['valid', 'test']:
            for f in glob.glob(
                    os.path.join(minif2f_repo_dir, _ISABELLEDIR, '%s/*.thy' % split)
            ):
                text = open(f).read()
                name = Path(f).name.replace('.thy', '')
                thm = extract_theorem(text)
                header = extract_header(text)
                informal = json.load(open(
                    os.path.join(minif2f_repo_dir, _INFORMALDIR, '%s/%s.json' % (split, name))
                ))

                splits[split].append({
                    'problem_name': name,
                    'formal_statement': thm,
                    'informal_statement': informal['informal_statement'],
                    'informal_proof': informal['informal_proof'],
                    'header': header
                })

        assert len(splits['valid']) == 244
        assert len(splits['test']) == 244

        return [
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "split": "valid",
                    "examples": splits['valid']
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "split": "test",
                    "examples": splits['test']
                },
            ),
        ]

    def _generate_examples(self, split, examples):
        for example in examples:
            key = example["problem_name"]
            yield key, example