shuyanzh commited on
Commit
daa33fe
1 Parent(s): acbd38e

Create tldr.py

Browse files
Files changed (1) hide show
  1. tldr.py +113 -0
tldr.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """CoNaLa dataset."""
16
+
17
+ import json
18
+ import datasets
19
+
20
+
21
+ _CITATION = """\
22
+ @article{zhou2022doccoder,
23
+ title={DocCoder: Generating Code by Retrieving and Reading Docs},
24
+ author={Zhou, Shuyan and Alon, Uri and Xu, Frank F and JIang, Zhengbao and Neubig, Graham},
25
+ journal={arXiv preprint arXiv:2207.05987},
26
+ year={2022}
27
+ }
28
+ """
29
+
30
+ _DESCRIPTION = """This is the re-split of CoNaLa dataset. For each code snippet in the dev and test set, at least one function is held out from the training set. This split aims at testing a code generation model's capacity in generating unseen functions.
31
+ We further make sure that examples from the same StackOverflow post (same question_id before -) are in the same split."""
32
+
33
+ _HOMEPAGE = "https://github.com/shuyanzhou/docprompting"
34
+ _URLs = {
35
+ "docs": "tldr-docs.jsonl",
36
+ "data": {"train": "tldr-train.jsonl", "validation": "tldr-dev.jsonl", "test": "tldr-test.jsonl" },
37
+ }
38
+
39
+ class DocPromptingConala(datasets.GeneratorBasedBuilder):
40
+ """TLDR natural language to bash generation dataset."""
41
+
42
+ VERSION = datasets.Version("1.1.0")
43
+
44
+
45
+ BUILDER_CONFIGS = [
46
+ datasets.BuilderConfig(
47
+ name="data",
48
+ version=datasets.Version("1.1.0"),
49
+ description=_DESCRIPTION,
50
+ ),
51
+ datasets.BuilderConfig(name="docs", version=datasets.Version("1.1.0"), description=_DESCRIPTION),
52
+ ]
53
+
54
+ DEFAULT_CONFIG_NAME = "data"
55
+
56
+
57
+ def _info(self):
58
+ if self.config.name == "data":
59
+ features=datasets.Features({"question_id": datasets.Value("string"),
60
+ "nl": datasets.Value("string"),
61
+ "cmd": datasets.Value("string"),
62
+ "oracle_man": datasets.Sequence(feature=datasets.Value("string")),
63
+ "canonical_cmd": datasets.Value("string"),
64
+ "cmd_name": datasets.Value("string"),
65
+ "tldr_cmd_name": datasets.Value("string"),
66
+ "manual_exist": datasets.Value("bool"),
67
+ "matching_info": dict()
68
+ })
69
+ else:
70
+ features=datasets.Features({"doc_id": datasets.Value("string"),
71
+ "doc_content": datasets.Value("string"),
72
+ })
73
+ return datasets.DatasetInfo(
74
+ description=_DESCRIPTION,
75
+ features=features,
76
+ supervised_keys=None,
77
+ citation=_CITATION,
78
+ homepage=_HOMEPAGE)
79
+
80
+ def _split_generators(self, dl_manager):
81
+ """Returns SplitGenerators."""
82
+ config_urls = _URLs[self.config.name]
83
+ data_dir = dl_manager.download_and_extract(config_urls)
84
+ if self.config.name == "data":
85
+ return [
86
+ datasets.SplitGenerator(
87
+ name=datasets.Split.TRAIN,
88
+ gen_kwargs={"filepath": data_dir["train"], "split": "train"},
89
+ ),
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TEST,
92
+ gen_kwargs={"filepath": data_dir["test"], "split": "test"},
93
+ ),
94
+ datasets.SplitGenerator(
95
+ name=datasets.Split.VALIDATION,
96
+ gen_kwargs={"filepath": data_dir["validation"], "split": "validation"},
97
+ ),
98
+ ]
99
+ else:
100
+ return [
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.TRAIN,
103
+ gen_kwargs={"filepath": data_dir, "split": "train"},
104
+ ),
105
+ ]
106
+
107
+
108
+ def _generate_examples(self, filepath, split):
109
+ key = 0
110
+ for line in open(filepath, encoding="utf-8"):
111
+ line = json.loads(line)
112
+ yield key, line
113
+ key += 1