albertvillanova HF staff commited on
Commit
54bdbd7
1 Parent(s): 9e629d7

Convert dataset to Parquet (#6)

Browse files

- Convert dataset to Parquet (ad0bf494765acb68bc670e6f5756770775803520)
- Add python data files (7f61525af75001bdcba4a36a15b870c2be051902)
- Delete loading script (4681785250b212273b3363aed349eaaf2f7168e4)
- Delete loading script auxiliary file (e7eebd26b27fe00e9097020c557157793fb6c5f4)
- Delete data file (fa1ce2a213d78018655640c19183e34a55b59ccc)
- Delete loading script auxiliary file (e5006b7e221f4c891ce8d8285ff8409d1d36cad9)
- Delete data file (dfef3bbb8a79c22c7c04278945a9fbd97543a699)
- Delete data file (be1d62668ddc3c16afc64e3269140bd48b834473)
- Delete data file (b9ec9e990cf69293f6bcd70c37d73f95879814e2)

README.md CHANGED
@@ -29,16 +29,16 @@ dataset_info:
29
  sequence: string
30
  splits:
31
  - name: train
32
- num_bytes: 128312061
33
  num_examples: 12934
34
  - name: validation
35
- num_bytes: 30259174
36
  num_examples: 7189
37
  - name: test
38
- num_bytes: 43027956
39
  num_examples: 8268
40
- download_size: 126856519
41
- dataset_size: 201599191
42
  - config_name: python
43
  features:
44
  - name: id
@@ -49,13 +49,28 @@ dataset_info:
49
  sequence: string
50
  splits:
51
  - name: train
52
- num_bytes: 684319575
53
  num_examples: 100000
54
  - name: test
55
- num_bytes: 333978088
56
  num_examples: 50000
57
- download_size: 199067128
58
- dataset_size: 1018297663
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  ---
60
  # Dataset Card for "code_x_glue_cc_code_completion_token"
61
 
29
  sequence: string
30
  splits:
31
  - name: train
32
+ num_bytes: 128312045
33
  num_examples: 12934
34
  - name: validation
35
+ num_bytes: 30259166
36
  num_examples: 7189
37
  - name: test
38
+ num_bytes: 43027948
39
  num_examples: 8268
40
+ download_size: 31320339
41
+ dataset_size: 201599159
42
  - config_name: python
43
  features:
44
  - name: id
49
  sequence: string
50
  splits:
51
  - name: train
52
+ num_bytes: 684319455
53
  num_examples: 100000
54
  - name: test
55
+ num_bytes: 333978028
56
  num_examples: 50000
57
+ download_size: 210143525
58
+ dataset_size: 1018297483
59
+ configs:
60
+ - config_name: java
61
+ data_files:
62
+ - split: train
63
+ path: java/train-*
64
+ - split: validation
65
+ path: java/validation-*
66
+ - split: test
67
+ path: java/test-*
68
+ - config_name: python
69
+ data_files:
70
+ - split: train
71
+ path: python/train-*
72
+ - split: test
73
+ path: python/test-*
74
  ---
75
  # Dataset Card for "code_x_glue_cc_code_completion_token"
76
 
code_x_glue_cc_code_completion_token.py DELETED
@@ -1,220 +0,0 @@
1
- import os
2
- import os.path
3
- from typing import List
4
-
5
- import datasets
6
-
7
- from .common import Child
8
- from .generated_definitions import DEFINITIONS
9
-
10
-
11
- _DESCRIPTION = """Predict next code token given context of previous tokens. Models are evaluated by token level accuracy.
12
- Code completion is a one of the most widely used features in software development through IDEs. An effective code completion tool could improve software developers' productivity. We provide code completion evaluation tasks in two granularities -- token level and line level. Here we introduce token level code completion. Token level task is analogous to language modeling. Models should have be able to predict the next token in arbitary types.
13
- """
14
-
15
- _CITATION = """@article{raychev2016probabilistic,
16
- title={Probabilistic Model for Code with Decision Trees},
17
- author={Raychev, Veselin and Bielik, Pavol and Vechev, Martin},
18
- journal={ACM SIGPLAN Notices},
19
- pages={731--747},
20
- year={2016},
21
- publisher={ACM New York, NY, USA}
22
- }
23
- @inproceedings{allamanis2013mining,
24
- title={Mining Source Code Repositories at Massive Scale using Language Modeling},
25
- author={Allamanis, Miltiadis and Sutton, Charles},
26
- booktitle={2013 10th Working Conference on Mining Software Repositories (MSR)},
27
- pages={207--216},
28
- year={2013},
29
- organization={IEEE}
30
- }
31
- @dataset{rafael_michael_karampatsis_2020_3628665,
32
- author = {Rafael - Michael Karampatsis and
33
- Hlib Babii and
34
- Romain Robbes and
35
- Charles Sutton and
36
- Andrea Janes},
37
- title = {Preprocessed Java Code Corpus},
38
- month = jan,
39
- year = 2020,
40
- publisher = {Zenodo},
41
- version = {1.0},
42
- doi = {10.5281/zenodo.3628665},
43
- url = {https://doi.org/10.5281/zenodo.3628665}
44
- }"""
45
-
46
-
47
- class CodeXGlueCcCodeCompletionTokenImpl(Child):
48
- _DESCRIPTION = _DESCRIPTION
49
- _CITATION = _CITATION
50
-
51
-
52
- class CodeXGlueCcCodeCompletionTokenJavaImpl(CodeXGlueCcCodeCompletionTokenImpl):
53
- SPLITS = {
54
- "training": datasets.Split.TRAIN,
55
- "validation": datasets.Split.VALIDATION,
56
- "test": datasets.Split.TEST,
57
- }
58
-
59
- _FEATURES = {
60
- "id": datasets.Value("int32"), # Index of the sample
61
- "code": datasets.features.Sequence(datasets.Value("string")), # Code Tokens
62
- }
63
-
64
- def generate_urls(self, split_name):
65
- language = self.info["parameters"]["language"]
66
- if language != "java":
67
- raise RuntimeError(f"Unknown language {language}: should be java.")
68
-
69
- yield "data", f"https://huggingface.co/datasets/code_x_glue_cc_code_completion_token/resolve/main/data/java/java_{split_name}_pre"
70
-
71
- def _generate_examples(self, split_name, file_paths):
72
- with open(file_paths["data"], encoding="utf-8") as f:
73
- for idx, line in enumerate(f):
74
- new_data = []
75
- for token in line.strip().split():
76
- if len(token) > 100:
77
- continue
78
- new_data.append(token)
79
- entry = dict(id=idx, code=new_data)
80
- yield idx, entry
81
-
82
-
83
- class CodeXGlueCcCodeCompletionTokenPythonImpl(CodeXGlueCcCodeCompletionTokenImpl):
84
- SPLITS = {"train": datasets.Split.TRAIN, "test": datasets.Split.TEST}
85
-
86
- _FEATURES = {
87
- "id": datasets.Value("int32"), # Index of the sample
88
- "path": datasets.Value("string"), # Original path in the dataset
89
- "code": datasets.features.Sequence(datasets.Value("string")), # Code Tokens
90
- }
91
-
92
- PYTHON_FILE_MAPPING = dict(train="python100k_train.txt", test="python50k_eval.txt")
93
-
94
- def generate_urls(self, split_name):
95
- language = self.info["parameters"]["language"]
96
- if language != "python":
97
- raise RuntimeError(f"Unknown language {language}")
98
-
99
- yield "data", "https://huggingface.co/datasets/code_x_glue_cc_code_completion_token/resolve/main/data/python/py150_files.zip"
100
-
101
- def process_string(self, token):
102
- # Copyright (c) Microsoft Corporation.
103
- # Licensed under the MIT License.
104
- import re
105
-
106
- str_quote_options = ["'''", '"""', "'", '"']
107
- start_quote = ""
108
- end_quote = ""
109
- qualifier_regex = r"^[a-z]+"
110
- qualifier_match = re.search(qualifier_regex, token)
111
- # string qualifiers like 'r' for regex, 'f' for formatted string, 'b' for bytes, 'u' for unicode, etc (or combination of them)
112
- qualifier = "" if not qualifier_match else qualifier_match[0]
113
- # token string without qualifiers
114
- token_string = re.sub(qualifier_regex, "", token)
115
- # string literal without quotes
116
- str_lit = token_string
117
- for q in str_quote_options:
118
- if token_string.startswith(q):
119
- start_quote = q
120
- str_lit = str_lit[len(q) :]
121
- if token_string.endswith(q):
122
- end_quote = q
123
- str_lit = str_lit[: -len(q)]
124
- break
125
- if start_quote in str_quote_options[:2]:
126
- return ""
127
- return (
128
- f"{qualifier}{start_quote}{str_lit}{end_quote}"
129
- if len(str_lit) < 15
130
- and "\n" not in str_lit
131
- and "</s>" not in str_lit
132
- and "<s>" not in str_lit
133
- and "<pad>" not in str_lit
134
- and "<EOL>" not in str_lit
135
- else f"{qualifier}{start_quote}{end_quote}"
136
- )
137
-
138
- def py_tokenize(self, base_dir, file_name):
139
- # Copyright (c) Microsoft Corporation.
140
- # Licensed under the MIT License.
141
- from io import BytesIO
142
- from tokenize import COMMENT, ENCODING, ENDMARKER, INDENT, NEWLINE, NL, NUMBER, STRING, tokenize
143
-
144
- file_paths = open(os.path.join(base_dir, file_name), encoding="utf-8").readlines()
145
- for ct, path in enumerate(file_paths):
146
- try:
147
- code = open(os.path.join(base_dir, path.strip()), encoding="utf-8").read()
148
- token_gen = tokenize(BytesIO(bytes(code, "utf8")).readline)
149
- out_tokens = []
150
- prev_eol = False
151
- for toknum, tokval, _, _, _ in token_gen:
152
- tokval = " ".join(tokval.split())
153
- if len(tokval) > 100:
154
- continue
155
- if toknum == STRING:
156
- add_token = self.process_string(tokval)
157
- if len(add_token) > 0:
158
- out_tokens.append(add_token)
159
- prev_eol = False
160
- elif toknum == NUMBER:
161
- if len(tokval) < 50:
162
- out_tokens.append(tokval)
163
- prev_eol = False
164
- elif toknum in [NEWLINE, NL]:
165
- if not prev_eol:
166
- out_tokens.append("<EOL>")
167
- prev_eol = True
168
- elif toknum in [COMMENT, INDENT, ENCODING, ENDMARKER] or len(tokval) == 0:
169
- continue
170
- else:
171
- out_tokens.append(tokval)
172
- prev_eol = False
173
- if out_tokens[0] == "<EOL>":
174
- out_tokens = out_tokens[1:]
175
- if out_tokens[-1] == "<EOL>":
176
- out_tokens = out_tokens[:-1]
177
- except Exception:
178
- out_tokens = []
179
- out_tokens = ["<s>"] + out_tokens + ["</s>"]
180
- yield path, out_tokens
181
-
182
- def _generate_examples(self, split_name, file_paths):
183
- base_dir = file_paths["data"]
184
- filename = self.PYTHON_FILE_MAPPING[split_name]
185
-
186
- idx = 0
187
- for entry in self.py_tokenize(base_dir=base_dir, file_name=filename):
188
- path, out_tokens = entry
189
- path = path[len("data/") :]
190
- yield idx, dict(id=idx, path=path, code=out_tokens)
191
- idx += 1
192
-
193
-
194
- CLASS_MAPPING = {
195
- "CodeXGlueCcCodeCompletionTokenJava": CodeXGlueCcCodeCompletionTokenJavaImpl,
196
- "CodeXGlueCcCodeCompletionTokenPython": CodeXGlueCcCodeCompletionTokenPythonImpl,
197
- }
198
-
199
-
200
- class CodeXGlueCcCodeCompletionToken(datasets.GeneratorBasedBuilder):
201
- BUILDER_CONFIG_CLASS = datasets.BuilderConfig
202
- BUILDER_CONFIGS = [
203
- datasets.BuilderConfig(name=name, description=info["description"]) for name, info in DEFINITIONS.items()
204
- ]
205
-
206
- def _info(self):
207
- name = self.config.name
208
- info = DEFINITIONS[name]
209
- if info["class_name"] in CLASS_MAPPING:
210
- self.child = CLASS_MAPPING[info["class_name"]](info)
211
- else:
212
- raise RuntimeError(f"Unknown python class for dataset configuration {name}")
213
- ret = self.child._info()
214
- return ret
215
-
216
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
217
- return self.child._split_generators(dl_manager=dl_manager)
218
-
219
- def _generate_examples(self, split_name, file_paths):
220
- return self.child._generate_examples(split_name, file_paths)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
common.py DELETED
@@ -1,75 +0,0 @@
1
- from typing import List
2
-
3
- import datasets
4
-
5
-
6
- # Citation, taken from https://github.com/microsoft/CodeXGLUE
7
- _DEFAULT_CITATION = """@article{CodeXGLUE,
8
- title={CodeXGLUE: A Benchmark Dataset and Open Challenge for Code Intelligence},
9
- year={2020},}"""
10
-
11
-
12
- class Child:
13
- _DESCRIPTION = None
14
- _FEATURES = None
15
- _CITATION = None
16
- SPLITS = {"train": datasets.Split.TRAIN}
17
- _SUPERVISED_KEYS = None
18
-
19
- def __init__(self, info):
20
- self.info = info
21
-
22
- def homepage(self):
23
- return self.info["project_url"]
24
-
25
- def _info(self):
26
- # This is the description that will appear on the datasets page.
27
- return datasets.DatasetInfo(
28
- description=self.info["description"] + "\n\n" + self._DESCRIPTION,
29
- features=datasets.Features(self._FEATURES),
30
- homepage=self.homepage(),
31
- citation=self._CITATION or _DEFAULT_CITATION,
32
- supervised_keys=self._SUPERVISED_KEYS,
33
- )
34
-
35
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
36
- SPLITS = self.SPLITS
37
- _URL = self.info["raw_url"]
38
- urls_to_download = {}
39
- for split in SPLITS:
40
- if split not in urls_to_download:
41
- urls_to_download[split] = {}
42
-
43
- for key, url in self.generate_urls(split):
44
- if not url.startswith("http"):
45
- url = _URL + "/" + url
46
- urls_to_download[split][key] = url
47
-
48
- downloaded_files = {}
49
- for k, v in urls_to_download.items():
50
- downloaded_files[k] = dl_manager.download_and_extract(v)
51
-
52
- return [
53
- datasets.SplitGenerator(
54
- name=SPLITS[k],
55
- gen_kwargs={"split_name": k, "file_paths": downloaded_files[k]},
56
- )
57
- for k in SPLITS
58
- ]
59
-
60
- def check_empty(self, entries):
61
- all_empty = all([v == "" for v in entries.values()])
62
- all_non_empty = all([v != "" for v in entries.values()])
63
-
64
- if not all_non_empty and not all_empty:
65
- raise RuntimeError("Parallel data files should have the same number of lines.")
66
-
67
- return all_empty
68
-
69
-
70
- class TrainValidTestChild(Child):
71
- SPLITS = {
72
- "train": datasets.Split.TRAIN,
73
- "valid": datasets.Split.VALIDATION,
74
- "test": datasets.Split.TEST,
75
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
generated_definitions.py DELETED
@@ -1,24 +0,0 @@
1
- DEFINITIONS = {
2
- "java": {
3
- "class_name": "CodeXGlueCcCodeCompletionTokenJava",
4
- "dataset_type": "Code-Code",
5
- "description": "CodeXGLUE CodeCompletion-token dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/CodeCompletion-token",
6
- "dir_name": "CodeCompletion-token",
7
- "name": "java",
8
- "parameters": {"language": "java", "original_language_name": "javaCorpus"},
9
- "project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/CodeCompletion-token",
10
- "raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/CodeCompletion-token/dataset/javaCorpus",
11
- "sizes": {"test": 8268, "train": 12934, "validation": 7189},
12
- },
13
- "python": {
14
- "class_name": "CodeXGlueCcCodeCompletionTokenPython",
15
- "dataset_type": "Code-Code",
16
- "description": "CodeXGLUE CodeCompletion-token dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/CodeCompletion-token",
17
- "dir_name": "CodeCompletion-token",
18
- "name": "python",
19
- "parameters": {"language": "python", "original_language_name": "py150"},
20
- "project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/CodeCompletion-token",
21
- "raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/CodeCompletion-token/dataset/py150",
22
- "sizes": {"test": 50000, "train": 100000},
23
- },
24
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/java/java_validation_pre → java/test-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0c58a97d96aa7435396581ee5efbb93c0e74a545ba5f795f878098a6e59ab8b3
3
- size 18835141
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97514e34366dc824ff71e472fbdfdf992019b036d849cf11fc458618db219d5b
3
+ size 6543103
data/java/java_test_pre → java/train-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a88cd5c91c2ed23a928528bef3535f4fc8db1359975447211f2b13926cc38d9d
3
- size 26969670
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ed2b347f7fef51b39e454873acf4ebed067df6c7fb2b3d0a446212c3dd02b06
3
+ size 19617199
data/java/java_training_pre → java/validation-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:676295d2756adcac22e213fbc3ea0f50669a0d152e9497e23a2929e2e2124905
3
- size 81051708
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e031afbd9871fe73dccaf3967012feac00baf1d3e5794d0319034a42a9456d17
3
+ size 5160037
data/python/py150_files.zip → python/test-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f679c6beaabd70d15cd49e6e47cd01dc1ec4226e60ec83a2bb210ff8acf6dd6
3
- size 296440502
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cca24d90204d32b16c05dd4272bd400a38984e70b262408ed4847406263831c2
3
+ size 69199821
python/train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:974f6153acd0660045612a037529cfbcf2a2b60e67967e981f90fe558477edc6
3
+ size 70071686
python/train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ecf0d0bada24b45e7dff93e8b083acbc17e9e96c6c99c6870e3aad989eb8c3f
3
+ size 70872018