Datasets:

Modalities:
Text
Languages:
code
ArXiv:
Libraries:
Datasets
License:
shuyanzh commited on
Commit
fe88bd9
·
1 Parent(s): 297d935

Create docprompting-conala.py

Browse files
Files changed (1) hide show
  1. docprompting-conala.py +111 -0
docprompting-conala.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """CoNaLa dataset."""
16
+
17
+ import json
18
+ import datasets
19
+
20
+
21
+ _CITATION = """\
22
+ @article{zhou2022doccoder,
23
+ title={DocCoder: Generating Code by Retrieving and Reading Docs},
24
+ author={Zhou, Shuyan and Alon, Uri and Xu, Frank F and JIang, Zhengbao and Neubig, Graham},
25
+ journal={arXiv preprint arXiv:2207.05987},
26
+ year={2022}
27
+ }
28
+ """
29
+
30
+ _DESCRIPTION = """This is the re-split of CoNaLa dataset. For each code snippet in the dev and test set, at least one function is held out from the training set. This split aims at testing a code generation model's capacity in generating unseen functions.
31
+ We further make sure that examples from the same StackOverflow post (same question_id before -) are in the same split."""
32
+
33
+ _HOMEPAGE = "https://github.com/shuyanzhou/docprompting"
34
+ _URLs = {
35
+ "docs": "conala-docs.json",
36
+ "data": {"train": "conala-train.json", "validation": "conala-dev.json", "test": "conala-test.json" },
37
+ }
38
+
39
+ class DocPromptingConala(datasets.GeneratorBasedBuilder):
40
+ """The resplit of CoNaLa Code dataset."""
41
+
42
+ VERSION = datasets.Version("1.1.0")
43
+
44
+
45
+ BUILDER_CONFIGS = [
46
+ datasets.BuilderConfig(
47
+ name="data",
48
+ version=datasets.Version("1.1.0"),
49
+ description=_DESCRIPTION,
50
+ ),
51
+ datasets.BuilderConfig(name="docs", version=datasets.Version("1.1.0"), description=_DESCRIPTION),
52
+ ]
53
+
54
+ DEFAULT_CONFIG_NAME = "data"
55
+
56
+
57
+ def _info(self):
58
+ if self.config.name == "data":
59
+ features=datasets.Features({"question_id": datasets.Value("string"),
60
+ "nl": datasets.Value("string"),
61
+ "cmd": datasets.Value("string"),
62
+ "oracle_man": datasets.Value("list"),
63
+ "canonical_cmd": datasets.Value("string"),
64
+ "cmd_name": datasets.Value("string"),
65
+
66
+ })
67
+ else:
68
+ features=datasets.Features({"doc_id": datasets.Value("string"),
69
+ "doc_content": datasets.Value("string"),
70
+ })
71
+ return datasets.DatasetInfo(
72
+ description=_DESCRIPTION,
73
+ features=features,
74
+ supervised_keys=None,
75
+ citation=_CITATION,
76
+ homepage=_HOMEPAGE)
77
+
78
+ def _split_generators(self, dl_manager):
79
+ """Returns SplitGenerators."""
80
+ config_urls = _URLs[self.config.name]
81
+ data_dir = dl_manager.download_and_extract(config_urls)
82
+ if self.config.name == "data":
83
+ return [
84
+ datasets.SplitGenerator(
85
+ name=datasets.Split.TRAIN,
86
+ gen_kwargs={"filepath": data_dir["train"], "split": "train"},
87
+ ),
88
+ datasets.SplitGenerator(
89
+ name=datasets.Split.TEST,
90
+ gen_kwargs={"filepath": data_dir["test"], "split": "test"},
91
+ ),
92
+ datasets.SplitGenerator(
93
+ name=datasets.Split.VALIDATION,
94
+ gen_kwargs={"filepath": data_dir["validation"], "split": "validation"},
95
+ ),
96
+ ]
97
+ else:
98
+ return [
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.TRAIN,
101
+ gen_kwargs={"filepath": data_dir, "split": "train"},
102
+ ),
103
+ ]
104
+
105
+
106
+ def _generate_examples(self, filepath, split):
107
+ key = 0
108
+ for line in open(filepath, encoding="utf-8"):
109
+ line = json.loads(line)
110
+ yield key, line
111
+ key += 1