system HF staff commited on
Commit
512d901
0 Parent(s):

Update files from the datasets library (from 1.8.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.8.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - found
4
+ language_creators:
5
+ - found
6
+ languages:
7
+ - code
8
+ - en
9
+ licenses:
10
+ - other-C-UDA
11
+ multilinguality:
12
+ - other-programming-languages
13
+ size_categories:
14
+ - 100K<n<1M
15
+ source_datasets:
16
+ - original
17
+ task_categories:
18
+ - text-retrieval
19
+ task_ids:
20
+ - document-retrieval
21
+ ---
22
+ # Dataset Card for "code_x_glue_tc_nl_code_search_adv"
23
+
24
+ ## Table of Contents
25
+ - [Dataset Description](#dataset-description)
26
+ - [Dataset Summary](#dataset-summary)
27
+ - [Supported Tasks and Leaderboards](#supported-tasks)
28
+ - [Languages](#languages)
29
+ - [Dataset Structure](#dataset-structure)
30
+ - [Data Instances](#data-instances)
31
+ - [Data Fields](#data-fields)
32
+ - [Data Splits](#data-splits-sample-size)
33
+ - [Dataset Creation](#dataset-creation)
34
+ - [Curation Rationale](#curation-rationale)
35
+ - [Source Data](#source-data)
36
+ - [Annotations](#annotations)
37
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
38
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
39
+ - [Social Impact of Dataset](#social-impact-of-dataset)
40
+ - [Discussion of Biases](#discussion-of-biases)
41
+ - [Other Known Limitations](#other-known-limitations)
42
+ - [Additional Information](#additional-information)
43
+ - [Dataset Curators](#dataset-curators)
44
+ - [Licensing Information](#licensing-information)
45
+ - [Citation Information](#citation-information)
46
+ - [Contributions](#contributions)
47
+
48
+ ## Dataset Description
49
+
50
+ - **Homepage:** https://github.com/microsoft/CodeXGLUE/tree/main/Text-Code/NL-code-search-Adv
51
+
52
+ ### Dataset Summary
53
+
54
+ CodeXGLUE NL-code-search-Adv dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Text-Code/NL-code-search-Adv
55
+
56
+ The dataset we use comes from CodeSearchNet and we filter the dataset as the following:
57
+ - Remove examples that codes cannot be parsed into an abstract syntax tree.
58
+ - Remove examples that #tokens of documents is < 3 or >256
59
+ - Remove examples that documents contain special tokens (e.g. <img ...> or https:...)
60
+ - Remove examples that documents are not English.
61
+
62
+ ### Supported Tasks and Leaderboards
63
+
64
+ - `document-retrieval`: The dataset can be used to train a model for retrieving top-k codes from a given **English** natural language query.
65
+
66
+ ### Languages
67
+
68
+ - Python **programming** language
69
+ - English **natural** language
70
+
71
+ ## Dataset Structure
72
+
73
+ ### Data Instances
74
+
75
+ An example of 'validation' looks as follows.
76
+ ```
77
+ {
78
+ "argument_list": "",
79
+ "code": "def Func(arg_0, arg_1='.', arg_2=True, arg_3=False, **arg_4):\n \"\"\"Downloads Dailymotion videos by URL.\n \"\"\"\n\n arg_5 = get_content(rebuilt_url(arg_0))\n arg_6 = json.loads(match1(arg_5, r'qualities\":({.+?}),\"'))\n arg_7 = match1(arg_5, r'\"video_title\"\\s*:\\s*\"([^\"]+)\"') or \\\n match1(arg_5, r'\"title\"\\s*:\\s*\"([^\"]+)\"')\n arg_7 = unicodize(arg_7)\n\n for arg_8 in ['1080','720','480','380','240','144','auto']:\n try:\n arg_9 = arg_6[arg_8][1][\"url\"]\n if arg_9:\n break\n except KeyError:\n pass\n\n arg_10, arg_11, arg_12 = url_info(arg_9)\n\n print_info(site_info, arg_7, arg_10, arg_12)\n if not arg_3:\n download_urls([arg_9], arg_7, arg_11, arg_12, arg_1=arg_1, arg_2=arg_2)",
80
+ "code_tokens": ["def", "Func", "(", "arg_0", ",", "arg_1", "=", "'.'", ",", "arg_2", "=", "True", ",", "arg_3", "=", "False", ",", "**", "arg_4", ")", ":", "arg_5", "=", "get_content", "(", "rebuilt_url", "(", "arg_0", ")", ")", "arg_6", "=", "json", ".", "loads", "(", "match1", "(", "arg_5", ",", "r'qualities\":({.+?}),\"'", ")", ")", "arg_7", "=", "match1", "(", "arg_5", ",", "r'\"video_title\"\\s*:\\s*\"([^\"]+)\"'", ")", "or", "match1", "(", "arg_5", ",", "r'\"title\"\\s*:\\s*\"([^\"]+)\"'", ")", "arg_7", "=", "unicodize", "(", "arg_7", ")", "for", "arg_8", "in", "[", "'1080'", ",", "'720'", ",", "'480'", ",", "'380'", ",", "'240'", ",", "'144'", ",", "'auto'", "]", ":", "try", ":", "arg_9", "=", "arg_6", "[", "arg_8", "]", "[", "1", "]", "[", "\"url\"", "]", "if", "arg_9", ":", "break", "except", "KeyError", ":", "pass", "arg_10", ",", "arg_11", ",", "arg_12", "=", "url_info", "(", "arg_9", ")", "print_info", "(", "site_info", ",", "arg_7", ",", "arg_10", ",", "arg_12", ")", "if", "not", "arg_3", ":", "download_urls", "(", "[", "arg_9", "]", ",", "arg_7", ",", "arg_11", ",", "arg_12", ",", "arg_1", "=", "arg_1", ",", "arg_2", "=", "arg_2", ")"],
81
+ "docstring": "Downloads Dailymotion videos by URL.",
82
+ "docstring_summary": "Downloads Dailymotion videos by URL.",
83
+ "docstring_tokens": ["Downloads", "Dailymotion", "videos", "by", "URL", "."],
84
+ "func_name": "",
85
+ "id": 0,
86
+ "identifier": "dailymotion_download",
87
+ "language": "python",
88
+ "nwo": "soimort/you-get",
89
+ "original_string": "",
90
+ "parameters": "(url, output_dir='.', merge=True, info_only=False, **kwargs)",
91
+ "path": "src/you_get/extractors/dailymotion.py",
92
+ "repo": "",
93
+ "return_statement": "",
94
+ "score": 0.9997601509094238,
95
+ "sha": "b746ac01c9f39de94cac2d56f665285b0523b974",
96
+ "url": "https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/dailymotion.py#L13-L35"
97
+ }
98
+ ```
99
+
100
+ ### Data Fields
101
+
102
+ In the following each data field in go is explained for each config. The data fields are the same among all splits.
103
+
104
+ #### default
105
+
106
+ | field name | type | description |
107
+ |-----------------|-----------------------|-----------------------------------------------------------------------------------|
108
+ |id |int32 | Index of the sample |
109
+ |repo |string | repo: the owner/repo |
110
+ |path |string | path: the full path to the original file |
111
+ |func_name |string | func_name: the function or method name |
112
+ |original_string |string | original_string: the raw string before tokenization or parsing |
113
+ |language |string | language: the programming language |
114
+ |code |string | code/function: the part of the original_string that is code |
115
+ |code_tokens |Sequence[string] | code_tokens/function_tokens: tokenized version of code |
116
+ |docstring |string | docstring: the top-level comment or docstring, if it exists in the original string|
117
+ |docstring_tokens |Sequence[string] | docstring_tokens: tokenized version of docstring |
118
+ |sha |string | sha of the file |
119
+ |url |string | url of the file |
120
+ |docstring_summary|string | Summary of the docstring |
121
+ |parameters |string | parameters of the function |
122
+ |return_statement |string | return statement |
123
+ |argument_list |string | list of arguments of the function |
124
+ |identifier |string | identifier |
125
+ |nwo |string | nwo |
126
+ |score |datasets.Value("float"]| score for this search |
127
+
128
+ ### Data Splits
129
+
130
+ | name |train |validation|test |
131
+ |-------|-----:|---------:|----:|
132
+ |default|251820| 9604|19210|
133
+
134
+ ## Dataset Creation
135
+
136
+ ### Curation Rationale
137
+
138
+ [More Information Needed]
139
+
140
+ ### Source Data
141
+
142
+
143
+ #### Initial Data Collection and Normalization
144
+
145
+ Data from CodeSearchNet Challenge dataset.
146
+ [More Information Needed]
147
+
148
+ #### Who are the source language producers?
149
+
150
+ Software Engineering developers.
151
+
152
+ ### Annotations
153
+
154
+ #### Annotation process
155
+
156
+ [More Information Needed]
157
+
158
+ #### Who are the annotators?
159
+
160
+ [More Information Needed]
161
+
162
+ ### Personal and Sensitive Information
163
+
164
+ [More Information Needed]
165
+
166
+ ## Considerations for Using the Data
167
+
168
+ ### Social Impact of Dataset
169
+
170
+ [More Information Needed]
171
+
172
+ ### Discussion of Biases
173
+
174
+ [More Information Needed]
175
+
176
+ ### Other Known Limitations
177
+
178
+ [More Information Needed]
179
+
180
+ ## Additional Information
181
+
182
+ ### Dataset Curators
183
+
184
+ https://github.com/microsoft, https://github.com/madlag
185
+
186
+ ### Licensing Information
187
+
188
+ Computational Use of Data Agreement (C-UDA) License.
189
+
190
+ ### Citation Information
191
+
192
+ ```
193
+ @article{husain2019codesearchnet,
194
+ title={Codesearchnet challenge: Evaluating the state of semantic code search},
195
+ author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc},
196
+ journal={arXiv preprint arXiv:1909.09436},
197
+ year={2019}
198
+ }
199
+ ```
200
+
201
+ ### Contributions
202
+
203
+ Thanks to @madlag (and partly also @ncoop57) for adding this dataset.
code_x_glue_tc_nl_code_search_adv.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import os.path
4
+ from typing import List
5
+
6
+ import datasets
7
+
8
+ from .common import TrainValidTestChild
9
+ from .generated_definitions import DEFINITIONS
10
+
11
+
12
+ _DESCRIPTION = """The dataset we use comes from CodeSearchNet and we filter the dataset as the following:
13
+ - Remove examples that codes cannot be parsed into an abstract syntax tree.
14
+ - Remove examples that #tokens of documents is < 3 or >256
15
+ - Remove examples that documents contain special tokens (e.g. <img ...> or https:...)
16
+ - Remove examples that documents are not English.
17
+ """
18
+ _CITATION = """@article{husain2019codesearchnet,
19
+ title={Codesearchnet challenge: Evaluating the state of semantic code search},
20
+ author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc},
21
+ journal={arXiv preprint arXiv:1909.09436},
22
+ year={2019}
23
+ }"""
24
+
25
+
26
+ class CodeXGlueCtCodeToTextBaseImpl(TrainValidTestChild):
27
+ _DESCRIPTION = _DESCRIPTION
28
+ _CITATION = _CITATION
29
+
30
+ # For each file, each line in the uncompressed file represents one function.
31
+ _FEATURES = {
32
+ "id": datasets.Value("int32"), # Index of the sample
33
+ "repo": datasets.Value("string"), # repo: the owner/repo
34
+ "path": datasets.Value("string"), # path: the full path to the original file
35
+ "func_name": datasets.Value("string"), # func_name: the function or method name
36
+ "original_string": datasets.Value("string"), # original_string: the raw string before tokenization or parsing
37
+ "language": datasets.Value("string"), # language: the programming language name
38
+ "code": datasets.Value("string"), # code/function: the part of the original_string that is code
39
+ "code_tokens": datasets.features.Sequence(
40
+ datasets.Value("string")
41
+ ), # code_tokens/function_tokens: tokenized version of code
42
+ "docstring": datasets.Value(
43
+ "string"
44
+ ), # docstring: the top-level comment or docstring, if it exists in the original string
45
+ "docstring_tokens": datasets.features.Sequence(
46
+ datasets.Value("string")
47
+ ), # docstring_tokens: tokenized version of docstring
48
+ "sha": datasets.Value("string"), # sha of the file
49
+ "url": datasets.Value("string"), # url of the file
50
+ }
51
+
52
+ _SUPERVISED_KEYS = ["docstring", "docstring_tokens"]
53
+
54
+ def generate_urls(self, split_name, language):
55
+ yield "language", f"https://s3.amazonaws.com/code-search-net/CodeSearchNet/v2/{language}.zip"
56
+ yield "dataset", "dataset.zip"
57
+
58
+ def get_data_files(self, split_name, file_paths, language):
59
+ language_specific_path = file_paths["language"]
60
+ final_path = os.path.join(language_specific_path, language, "final")
61
+ # Make some cleanup to save space
62
+ for path in os.listdir(final_path):
63
+ if path.endswith(".pkl"):
64
+ os.unlink(path)
65
+
66
+ data_files = []
67
+ for root, dirs, files in os.walk(final_path):
68
+ for file in files:
69
+ temp = os.path.join(root, file)
70
+ if ".jsonl" in temp:
71
+ if split_name in temp:
72
+ data_files.append(temp)
73
+ return data_files
74
+
75
+ def post_process(self, split_name, language, js):
76
+ return js
77
+
78
+ def _generate_examples(self, split_name, file_paths, language):
79
+ import gzip
80
+
81
+ data_set_path = file_paths["dataset"]
82
+
83
+ data_files = self.get_data_files(split_name, file_paths, language)
84
+
85
+ urls = {}
86
+ f1_path_parts = [data_set_path, "dataset", language, f"{split_name}.txt"]
87
+ if self.SINGLE_LANGUAGE:
88
+ del f1_path_parts[2]
89
+
90
+ f1_path = os.path.join(*f1_path_parts)
91
+ with open(f1_path, encoding="utf-8") as f1:
92
+ for line in f1:
93
+ line = line.strip()
94
+ urls[line] = True
95
+
96
+ idx = 0
97
+ for file in data_files:
98
+ if ".gz" in file:
99
+ f = gzip.open(file)
100
+ else:
101
+ f = open(file, encoding="utf-8")
102
+
103
+ for line in f:
104
+ line = line.strip()
105
+ js = json.loads(line)
106
+ if js["url"] in urls:
107
+ js["id"] = idx
108
+ js = self.post_process(split_name, language, js)
109
+ if "partition" in js:
110
+ del js["partition"]
111
+ yield idx, js
112
+ idx += 1
113
+ f.close()
114
+
115
+
116
+ class CodeXGlueTcNLCodeSearchAdvImpl(CodeXGlueCtCodeToTextBaseImpl):
117
+ LANGUAGE = "python"
118
+ SINGLE_LANGUAGE = True
119
+
120
+ _FEATURES = {
121
+ "id": datasets.Value("int32"), # Index of the sample
122
+ "repo": datasets.Value("string"), # repo: the owner/repo
123
+ "path": datasets.Value("string"), # path: the full path to the original file
124
+ "func_name": datasets.Value("string"), # func_name: the function or method name
125
+ "original_string": datasets.Value("string"), # original_string: the raw string before tokenization or parsing
126
+ "language": datasets.Value("string"), # language: the programming language
127
+ "code": datasets.Value("string"), # code/function: the part of the original_string that is code
128
+ "code_tokens": datasets.features.Sequence(
129
+ datasets.Value("string")
130
+ ), # code_tokens/function_tokens: tokenized version of code
131
+ "docstring": datasets.Value(
132
+ "string"
133
+ ), # docstring: the top-level comment or docstring, if it exists in the original string
134
+ "docstring_tokens": datasets.features.Sequence(
135
+ datasets.Value("string")
136
+ ), # docstring_tokens: tokenized version of docstring
137
+ "sha": datasets.Value("string"), # sha of the file
138
+ "url": datasets.Value("string"), # url of the file
139
+ "docstring_summary": datasets.Value("string"), # Summary of the docstring
140
+ "parameters": datasets.Value("string"), # parameters of the function
141
+ "return_statement": datasets.Value("string"), # return statement
142
+ "argument_list": datasets.Value("string"), # list of arguments of the function
143
+ "identifier": datasets.Value("string"), # identifier
144
+ "nwo": datasets.Value("string"), # nwo
145
+ "score": datasets.Value("float"), # score for this search
146
+ }
147
+
148
+ def post_process(self, split_name, language, js):
149
+ for suffix in "_tokens", "":
150
+ key = "function" + suffix
151
+ if key in js:
152
+ js["code" + suffix] = js[key]
153
+ del js[key]
154
+
155
+ for key in self._FEATURES:
156
+ if key not in js:
157
+ if key == "score":
158
+ js[key] = -1
159
+ else:
160
+ js[key] = ""
161
+
162
+ return js
163
+
164
+ def generate_urls(self, split_name):
165
+ for e in super().generate_urls(split_name, self.LANGUAGE):
166
+ yield e
167
+
168
+ def get_data_files(self, split_name, file_paths, language):
169
+ if split_name == "train":
170
+ return super().get_data_files(split_name, file_paths, language)
171
+ else:
172
+ data_set_path = file_paths["dataset"]
173
+ data_file = os.path.join(data_set_path, "dataset", "test_code.jsonl")
174
+ return [data_file]
175
+
176
+ def _generate_examples(self, split_name, file_paths):
177
+ for e in super()._generate_examples(split_name, file_paths, self.LANGUAGE):
178
+ yield e
179
+
180
+
181
+ CLASS_MAPPING = {
182
+ "CodeXGlueTcNLCodeSearchAdv": CodeXGlueTcNLCodeSearchAdvImpl,
183
+ }
184
+
185
+
186
+ class CodeXGlueTcNlCodeSearchAdv(datasets.GeneratorBasedBuilder):
187
+ BUILDER_CONFIG_CLASS = datasets.BuilderConfig
188
+ BUILDER_CONFIGS = [
189
+ datasets.BuilderConfig(name=name, description=info["description"]) for name, info in DEFINITIONS.items()
190
+ ]
191
+
192
+ def _info(self):
193
+ name = self.config.name
194
+ info = DEFINITIONS[name]
195
+ if info["class_name"] in CLASS_MAPPING:
196
+ self.child = CLASS_MAPPING[info["class_name"]](info)
197
+ else:
198
+ raise RuntimeError(f"Unknown python class for dataset configuration {name}")
199
+ ret = self.child._info()
200
+ return ret
201
+
202
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
203
+ return self.child._split_generators(dl_manager=dl_manager)
204
+
205
+ def _generate_examples(self, split_name, file_paths):
206
+ return self.child._generate_examples(split_name, file_paths)
common.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ import datasets
4
+
5
+
6
+ # Citation, taken from https://github.com/microsoft/CodeXGLUE
7
+ _DEFAULT_CITATION = """@article{CodeXGLUE,
8
+ title={CodeXGLUE: A Benchmark Dataset and Open Challenge for Code Intelligence},
9
+ year={2020},}"""
10
+
11
+
12
+ class Child:
13
+ _DESCRIPTION = None
14
+ _FEATURES = None
15
+ _CITATION = None
16
+ SPLITS = {"train": datasets.Split.TRAIN}
17
+ _SUPERVISED_KEYS = None
18
+
19
+ def __init__(self, info):
20
+ self.info = info
21
+
22
+ def homepage(self):
23
+ return self.info["project_url"]
24
+
25
+ def _info(self):
26
+ # This is the description that will appear on the datasets page.
27
+ return datasets.DatasetInfo(
28
+ description=self.info["description"] + "\n\n" + self._DESCRIPTION,
29
+ features=datasets.Features(self._FEATURES),
30
+ homepage=self.homepage(),
31
+ citation=self._CITATION or _DEFAULT_CITATION,
32
+ supervised_keys=self._SUPERVISED_KEYS,
33
+ )
34
+
35
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
36
+ SPLITS = self.SPLITS
37
+ _URL = self.info["raw_url"]
38
+ urls_to_download = {}
39
+ for split in SPLITS:
40
+ if split not in urls_to_download:
41
+ urls_to_download[split] = {}
42
+
43
+ for key, url in self.generate_urls(split):
44
+ if not url.startswith("http"):
45
+ url = _URL + "/" + url
46
+ urls_to_download[split][key] = url
47
+
48
+ downloaded_files = {}
49
+ for k, v in urls_to_download.items():
50
+ downloaded_files[k] = dl_manager.download_and_extract(v)
51
+
52
+ return [
53
+ datasets.SplitGenerator(
54
+ name=SPLITS[k],
55
+ gen_kwargs={"split_name": k, "file_paths": downloaded_files[k]},
56
+ )
57
+ for k in SPLITS
58
+ ]
59
+
60
+ def check_empty(self, entries):
61
+ all_empty = all([v == "" for v in entries.values()])
62
+ all_non_empty = all([v != "" for v in entries.values()])
63
+
64
+ if not all_non_empty and not all_empty:
65
+ raise RuntimeError("Parallel data files should have the same number of lines.")
66
+
67
+ return all_empty
68
+
69
+
70
+ class TrainValidTestChild(Child):
71
+ SPLITS = {
72
+ "train": datasets.Split.TRAIN,
73
+ "valid": datasets.Split.VALIDATION,
74
+ "test": datasets.Split.TEST,
75
+ }
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "CodeXGLUE NL-code-search-Adv dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Text-Code/NL-code-search-Adv\n\nThe dataset we use comes from CodeSearchNet and we filter the dataset as the following:\n- Remove examples that codes cannot be parsed into an abstract syntax tree.\n- Remove examples that #tokens of documents is < 3 or >256\n- Remove examples that documents contain special tokens (e.g. <img ...> or https:...)\n- Remove examples that documents are not English.\n", "citation": "@article{husain2019codesearchnet,\ntitle={Codesearchnet challenge: Evaluating the state of semantic code search},\nauthor={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc},\njournal={arXiv preprint arXiv:1909.09436},\nyear={2019}\n}", "homepage": "https://github.com/madlag/CodeXGLUE/tree/main/Text-Code/NL-code-search-Adv", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "repo": {"dtype": "string", "id": null, "_type": "Value"}, "path": {"dtype": "string", "id": null, "_type": "Value"}, "func_name": {"dtype": "string", "id": null, "_type": "Value"}, "original_string": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "code": {"dtype": "string", "id": null, "_type": "Value"}, "code_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "docstring": {"dtype": "string", "id": null, "_type": "Value"}, "docstring_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "sha": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "docstring_summary": {"dtype": "string", "id": null, "_type": "Value"}, "parameters": {"dtype": "string", "id": null, "_type": "Value"}, "return_statement": {"dtype": "string", "id": null, "_type": "Value"}, "argument_list": {"dtype": "string", "id": null, "_type": "Value"}, "identifier": {"dtype": "string", "id": null, "_type": "Value"}, "nwo": {"dtype": "string", "id": null, "_type": "Value"}, "score": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "docstring", "output": "docstring_tokens"}, "task_templates": null, "builder_name": "code_x_glue_tc_nl_code_search_adv", "config_name": "default", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 820716084, "num_examples": 251820, "dataset_name": "code_x_glue_tc_nl_code_search_adv"}, "validation": {"name": "validation", "num_bytes": 23468834, "num_examples": 9604, "dataset_name": "code_x_glue_tc_nl_code_search_adv"}, "test": {"name": "test", "num_bytes": 47433760, "num_examples": 19210, "dataset_name": "code_x_glue_tc_nl_code_search_adv"}}, "download_checksums": {"https://s3.amazonaws.com/code-search-net/CodeSearchNet/v2/python.zip": {"num_bytes": 940909997, "checksum": "7223c6460bebfa85697b586da91e47bc5d64790a4d60bba5917106458ab6b40e"}, "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Text-Code/NL-code-search-Adv/dataset.zip": {"num_bytes": 25115627, "checksum": "b4d5157699ca3bda7a33674f17d7b24294b4c8f36f650cea01d3d0dbcefdc656"}}, "download_size": 966025624, "post_processing_size": null, "dataset_size": 891618678, "size_in_bytes": 1857644302}}
dummy/default/0.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca5e15a57bd9d5db3b5f4a5851768f221b73fb7d9265e5830033b187c5bc041d
3
+ size 17498
generated_definitions.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DEFINITIONS = {
2
+ "default": {
3
+ "class_name": "CodeXGlueTcNLCodeSearchAdv",
4
+ "dataset_type": "Text-Code",
5
+ "description": "CodeXGLUE NL-code-search-Adv dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Text-Code/NL-code-search-Adv",
6
+ "dir_name": "NL-code-search-Adv",
7
+ "name": "default",
8
+ "project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Text-Code/NL-code-search-Adv",
9
+ "raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Text-Code/NL-code-search-Adv",
10
+ "sizes": {"test": 19210, "train": 251820, "validation": 9604},
11
+ }
12
+ }