Datasets:

Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
9d83d86
1 Parent(s): fc262e6

Convert dataset to Parquet (#5)

Browse files

- Convert dataset to Parquet (bad29ae02f4650dffac1bcaeaaad545bfbcc9ec1)
- Delete loading script auxiliary file (40b5431dbbcddea8427674bf167a4f8046c0548e)
- Delete loading script (2d22038b0cdd7a18e53ab010fbbdd345690dea60)
- Delete loading script auxiliary file (f38ee8712c776ac49d99a846c2e06490aa660484)

README.md CHANGED
@@ -61,16 +61,25 @@ dataset_info:
61
  dtype: float32
62
  splits:
63
  - name: train
64
- num_bytes: 820716084
65
  num_examples: 251820
66
  - name: validation
67
- num_bytes: 23468834
68
  num_examples: 9604
69
  - name: test
70
- num_bytes: 47433760
71
  num_examples: 19210
72
- download_size: 966025624
73
- dataset_size: 891618678
 
 
 
 
 
 
 
 
 
74
  ---
75
  # Dataset Card for "code_x_glue_tc_nl_code_search_adv"
76
 
61
  dtype: float32
62
  splits:
63
  - name: train
64
+ num_bytes: 820714108
65
  num_examples: 251820
66
  - name: validation
67
+ num_bytes: 23468758
68
  num_examples: 9604
69
  - name: test
70
+ num_bytes: 47433608
71
  num_examples: 19210
72
+ download_size: 316235421
73
+ dataset_size: 891616474
74
+ configs:
75
+ - config_name: default
76
+ data_files:
77
+ - split: train
78
+ path: data/train-*
79
+ - split: validation
80
+ path: data/validation-*
81
+ - split: test
82
+ path: data/test-*
83
  ---
84
  # Dataset Card for "code_x_glue_tc_nl_code_search_adv"
85
 
code_x_glue_tc_nl_code_search_adv.py DELETED
@@ -1,206 +0,0 @@
1
- import json
2
- import os
3
- import os.path
4
- from typing import List
5
-
6
- import datasets
7
-
8
- from .common import TrainValidTestChild
9
- from .generated_definitions import DEFINITIONS
10
-
11
-
12
- _DESCRIPTION = """The dataset we use comes from CodeSearchNet and we filter the dataset as the following:
13
- - Remove examples that codes cannot be parsed into an abstract syntax tree.
14
- - Remove examples that #tokens of documents is < 3 or >256
15
- - Remove examples that documents contain special tokens (e.g. <img ...> or https:...)
16
- - Remove examples that documents are not English.
17
- """
18
- _CITATION = """@article{husain2019codesearchnet,
19
- title={Codesearchnet challenge: Evaluating the state of semantic code search},
20
- author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc},
21
- journal={arXiv preprint arXiv:1909.09436},
22
- year={2019}
23
- }"""
24
-
25
-
26
- class CodeXGlueCtCodeToTextBaseImpl(TrainValidTestChild):
27
- _DESCRIPTION = _DESCRIPTION
28
- _CITATION = _CITATION
29
-
30
- # For each file, each line in the uncompressed file represents one function.
31
- _FEATURES = {
32
- "id": datasets.Value("int32"), # Index of the sample
33
- "repo": datasets.Value("string"), # repo: the owner/repo
34
- "path": datasets.Value("string"), # path: the full path to the original file
35
- "func_name": datasets.Value("string"), # func_name: the function or method name
36
- "original_string": datasets.Value("string"), # original_string: the raw string before tokenization or parsing
37
- "language": datasets.Value("string"), # language: the programming language name
38
- "code": datasets.Value("string"), # code/function: the part of the original_string that is code
39
- "code_tokens": datasets.features.Sequence(
40
- datasets.Value("string")
41
- ), # code_tokens/function_tokens: tokenized version of code
42
- "docstring": datasets.Value(
43
- "string"
44
- ), # docstring: the top-level comment or docstring, if it exists in the original string
45
- "docstring_tokens": datasets.features.Sequence(
46
- datasets.Value("string")
47
- ), # docstring_tokens: tokenized version of docstring
48
- "sha": datasets.Value("string"), # sha of the file
49
- "url": datasets.Value("string"), # url of the file
50
- }
51
-
52
- _SUPERVISED_KEYS = ["docstring", "docstring_tokens"]
53
-
54
- def generate_urls(self, split_name, language):
55
- yield "language", f"https://huggingface.co/datasets/code_search_net/resolve/main/data/{language}.zip"
56
- yield "dataset", "dataset.zip"
57
-
58
- def get_data_files(self, split_name, file_paths, language):
59
- language_specific_path = file_paths["language"]
60
- final_path = os.path.join(language_specific_path, language, "final")
61
- # Make some cleanup to save space
62
- for path in os.listdir(final_path):
63
- if path.endswith(".pkl"):
64
- os.unlink(path)
65
-
66
- data_files = []
67
- for root, dirs, files in os.walk(final_path):
68
- for file in files:
69
- temp = os.path.join(root, file)
70
- if ".jsonl" in temp:
71
- if split_name in temp:
72
- data_files.append(temp)
73
- return data_files
74
-
75
- def post_process(self, split_name, language, js):
76
- return js
77
-
78
- def _generate_examples(self, split_name, file_paths, language):
79
- import gzip
80
-
81
- data_set_path = file_paths["dataset"]
82
-
83
- data_files = self.get_data_files(split_name, file_paths, language)
84
-
85
- urls = {}
86
- f1_path_parts = [data_set_path, "dataset", language, f"{split_name}.txt"]
87
- if self.SINGLE_LANGUAGE:
88
- del f1_path_parts[2]
89
-
90
- f1_path = os.path.join(*f1_path_parts)
91
- with open(f1_path, encoding="utf-8") as f1:
92
- for line in f1:
93
- line = line.strip()
94
- urls[line] = True
95
-
96
- idx = 0
97
- for file in data_files:
98
- if ".gz" in file:
99
- f = gzip.open(file)
100
- else:
101
- f = open(file, encoding="utf-8")
102
-
103
- for line in f:
104
- line = line.strip()
105
- js = json.loads(line)
106
- if js["url"] in urls:
107
- js["id"] = idx
108
- js = self.post_process(split_name, language, js)
109
- if "partition" in js:
110
- del js["partition"]
111
- yield idx, js
112
- idx += 1
113
- f.close()
114
-
115
-
116
- class CodeXGlueTcNLCodeSearchAdvImpl(CodeXGlueCtCodeToTextBaseImpl):
117
- LANGUAGE = "python"
118
- SINGLE_LANGUAGE = True
119
-
120
- _FEATURES = {
121
- "id": datasets.Value("int32"), # Index of the sample
122
- "repo": datasets.Value("string"), # repo: the owner/repo
123
- "path": datasets.Value("string"), # path: the full path to the original file
124
- "func_name": datasets.Value("string"), # func_name: the function or method name
125
- "original_string": datasets.Value("string"), # original_string: the raw string before tokenization or parsing
126
- "language": datasets.Value("string"), # language: the programming language
127
- "code": datasets.Value("string"), # code/function: the part of the original_string that is code
128
- "code_tokens": datasets.features.Sequence(
129
- datasets.Value("string")
130
- ), # code_tokens/function_tokens: tokenized version of code
131
- "docstring": datasets.Value(
132
- "string"
133
- ), # docstring: the top-level comment or docstring, if it exists in the original string
134
- "docstring_tokens": datasets.features.Sequence(
135
- datasets.Value("string")
136
- ), # docstring_tokens: tokenized version of docstring
137
- "sha": datasets.Value("string"), # sha of the file
138
- "url": datasets.Value("string"), # url of the file
139
- "docstring_summary": datasets.Value("string"), # Summary of the docstring
140
- "parameters": datasets.Value("string"), # parameters of the function
141
- "return_statement": datasets.Value("string"), # return statement
142
- "argument_list": datasets.Value("string"), # list of arguments of the function
143
- "identifier": datasets.Value("string"), # identifier
144
- "nwo": datasets.Value("string"), # nwo
145
- "score": datasets.Value("float"), # score for this search
146
- }
147
-
148
- def post_process(self, split_name, language, js):
149
- for suffix in "_tokens", "":
150
- key = "function" + suffix
151
- if key in js:
152
- js["code" + suffix] = js[key]
153
- del js[key]
154
-
155
- for key in self._FEATURES:
156
- if key not in js:
157
- if key == "score":
158
- js[key] = -1
159
- else:
160
- js[key] = ""
161
-
162
- return js
163
-
164
- def generate_urls(self, split_name):
165
- for e in super().generate_urls(split_name, self.LANGUAGE):
166
- yield e
167
-
168
- def get_data_files(self, split_name, file_paths, language):
169
- if split_name == "train":
170
- return super().get_data_files(split_name, file_paths, language)
171
- else:
172
- data_set_path = file_paths["dataset"]
173
- data_file = os.path.join(data_set_path, "dataset", "test_code.jsonl")
174
- return [data_file]
175
-
176
- def _generate_examples(self, split_name, file_paths):
177
- for e in super()._generate_examples(split_name, file_paths, self.LANGUAGE):
178
- yield e
179
-
180
-
181
- CLASS_MAPPING = {
182
- "CodeXGlueTcNLCodeSearchAdv": CodeXGlueTcNLCodeSearchAdvImpl,
183
- }
184
-
185
-
186
- class CodeXGlueTcNlCodeSearchAdv(datasets.GeneratorBasedBuilder):
187
- BUILDER_CONFIG_CLASS = datasets.BuilderConfig
188
- BUILDER_CONFIGS = [
189
- datasets.BuilderConfig(name=name, description=info["description"]) for name, info in DEFINITIONS.items()
190
- ]
191
-
192
- def _info(self):
193
- name = self.config.name
194
- info = DEFINITIONS[name]
195
- if info["class_name"] in CLASS_MAPPING:
196
- self.child = CLASS_MAPPING[info["class_name"]](info)
197
- else:
198
- raise RuntimeError(f"Unknown python class for dataset configuration {name}")
199
- ret = self.child._info()
200
- return ret
201
-
202
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
203
- return self.child._split_generators(dl_manager=dl_manager)
204
-
205
- def _generate_examples(self, split_name, file_paths):
206
- return self.child._generate_examples(split_name, file_paths)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
common.py DELETED
@@ -1,75 +0,0 @@
1
- from typing import List
2
-
3
- import datasets
4
-
5
-
6
- # Citation, taken from https://github.com/microsoft/CodeXGLUE
7
- _DEFAULT_CITATION = """@article{CodeXGLUE,
8
- title={CodeXGLUE: A Benchmark Dataset and Open Challenge for Code Intelligence},
9
- year={2020},}"""
10
-
11
-
12
- class Child:
13
- _DESCRIPTION = None
14
- _FEATURES = None
15
- _CITATION = None
16
- SPLITS = {"train": datasets.Split.TRAIN}
17
- _SUPERVISED_KEYS = None
18
-
19
- def __init__(self, info):
20
- self.info = info
21
-
22
- def homepage(self):
23
- return self.info["project_url"]
24
-
25
- def _info(self):
26
- # This is the description that will appear on the datasets page.
27
- return datasets.DatasetInfo(
28
- description=self.info["description"] + "\n\n" + self._DESCRIPTION,
29
- features=datasets.Features(self._FEATURES),
30
- homepage=self.homepage(),
31
- citation=self._CITATION or _DEFAULT_CITATION,
32
- supervised_keys=self._SUPERVISED_KEYS,
33
- )
34
-
35
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
36
- SPLITS = self.SPLITS
37
- _URL = self.info["raw_url"]
38
- urls_to_download = {}
39
- for split in SPLITS:
40
- if split not in urls_to_download:
41
- urls_to_download[split] = {}
42
-
43
- for key, url in self.generate_urls(split):
44
- if not url.startswith("http"):
45
- url = _URL + "/" + url
46
- urls_to_download[split][key] = url
47
-
48
- downloaded_files = {}
49
- for k, v in urls_to_download.items():
50
- downloaded_files[k] = dl_manager.download_and_extract(v)
51
-
52
- return [
53
- datasets.SplitGenerator(
54
- name=SPLITS[k],
55
- gen_kwargs={"split_name": k, "file_paths": downloaded_files[k]},
56
- )
57
- for k in SPLITS
58
- ]
59
-
60
- def check_empty(self, entries):
61
- all_empty = all([v == "" for v in entries.values()])
62
- all_non_empty = all([v != "" for v in entries.values()])
63
-
64
- if not all_non_empty and not all_empty:
65
- raise RuntimeError("Parallel data files should have the same number of lines.")
66
-
67
- return all_empty
68
-
69
-
70
- class TrainValidTestChild(Child):
71
- SPLITS = {
72
- "train": datasets.Split.TRAIN,
73
- "valid": datasets.Split.VALIDATION,
74
- "test": datasets.Split.TEST,
75
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fb129f32c5c83d58e906ee0f8a6cba513c371719af4331bc765d14314a7187b
3
+ size 16341074
data/train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91d821e7264a24335bc1da2b3e00529b9946611289a1cb71fa3fc1263b43273c
3
+ size 144265486
data/train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c576a6e59cf967476ec09d52c2436bb0a02f1079ba4507e5aea0bee48a27a74
3
+ size 147039811
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83324007473ad82297b0dc1d380cc7d14e709eae1f5f3e6bdf0c52ed0278e435
3
+ size 8589050
generated_definitions.py DELETED
@@ -1,12 +0,0 @@
1
- DEFINITIONS = {
2
- "default": {
3
- "class_name": "CodeXGlueTcNLCodeSearchAdv",
4
- "dataset_type": "Text-Code",
5
- "description": "CodeXGLUE NL-code-search-Adv dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Text-Code/NL-code-search-Adv",
6
- "dir_name": "NL-code-search-Adv",
7
- "name": "default",
8
- "project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Text-Code/NL-code-search-Adv",
9
- "raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Text-Code/NL-code-search-Adv",
10
- "sizes": {"test": 19210, "train": 251820, "validation": 9604},
11
- }
12
- }