albertvillanova HF staff commited on
Commit
4681785
1 Parent(s): 7f61525

Delete loading script

Browse files
code_x_glue_cc_code_completion_token.py DELETED
@@ -1,220 +0,0 @@
1
- import os
2
- import os.path
3
- from typing import List
4
-
5
- import datasets
6
-
7
- from .common import Child
8
- from .generated_definitions import DEFINITIONS
9
-
10
-
11
- _DESCRIPTION = """Predict next code token given context of previous tokens. Models are evaluated by token level accuracy.
12
- Code completion is a one of the most widely used features in software development through IDEs. An effective code completion tool could improve software developers' productivity. We provide code completion evaluation tasks in two granularities -- token level and line level. Here we introduce token level code completion. Token level task is analogous to language modeling. Models should have be able to predict the next token in arbitary types.
13
- """
14
-
15
- _CITATION = """@article{raychev2016probabilistic,
16
- title={Probabilistic Model for Code with Decision Trees},
17
- author={Raychev, Veselin and Bielik, Pavol and Vechev, Martin},
18
- journal={ACM SIGPLAN Notices},
19
- pages={731--747},
20
- year={2016},
21
- publisher={ACM New York, NY, USA}
22
- }
23
- @inproceedings{allamanis2013mining,
24
- title={Mining Source Code Repositories at Massive Scale using Language Modeling},
25
- author={Allamanis, Miltiadis and Sutton, Charles},
26
- booktitle={2013 10th Working Conference on Mining Software Repositories (MSR)},
27
- pages={207--216},
28
- year={2013},
29
- organization={IEEE}
30
- }
31
- @dataset{rafael_michael_karampatsis_2020_3628665,
32
- author = {Rafael - Michael Karampatsis and
33
- Hlib Babii and
34
- Romain Robbes and
35
- Charles Sutton and
36
- Andrea Janes},
37
- title = {Preprocessed Java Code Corpus},
38
- month = jan,
39
- year = 2020,
40
- publisher = {Zenodo},
41
- version = {1.0},
42
- doi = {10.5281/zenodo.3628665},
43
- url = {https://doi.org/10.5281/zenodo.3628665}
44
- }"""
45
-
46
-
47
- class CodeXGlueCcCodeCompletionTokenImpl(Child):
48
- _DESCRIPTION = _DESCRIPTION
49
- _CITATION = _CITATION
50
-
51
-
52
- class CodeXGlueCcCodeCompletionTokenJavaImpl(CodeXGlueCcCodeCompletionTokenImpl):
53
- SPLITS = {
54
- "training": datasets.Split.TRAIN,
55
- "validation": datasets.Split.VALIDATION,
56
- "test": datasets.Split.TEST,
57
- }
58
-
59
- _FEATURES = {
60
- "id": datasets.Value("int32"), # Index of the sample
61
- "code": datasets.features.Sequence(datasets.Value("string")), # Code Tokens
62
- }
63
-
64
- def generate_urls(self, split_name):
65
- language = self.info["parameters"]["language"]
66
- if language != "java":
67
- raise RuntimeError(f"Unknown language {language}: should be java.")
68
-
69
- yield "data", f"https://huggingface.co/datasets/code_x_glue_cc_code_completion_token/resolve/main/data/java/java_{split_name}_pre"
70
-
71
- def _generate_examples(self, split_name, file_paths):
72
- with open(file_paths["data"], encoding="utf-8") as f:
73
- for idx, line in enumerate(f):
74
- new_data = []
75
- for token in line.strip().split():
76
- if len(token) > 100:
77
- continue
78
- new_data.append(token)
79
- entry = dict(id=idx, code=new_data)
80
- yield idx, entry
81
-
82
-
83
- class CodeXGlueCcCodeCompletionTokenPythonImpl(CodeXGlueCcCodeCompletionTokenImpl):
84
- SPLITS = {"train": datasets.Split.TRAIN, "test": datasets.Split.TEST}
85
-
86
- _FEATURES = {
87
- "id": datasets.Value("int32"), # Index of the sample
88
- "path": datasets.Value("string"), # Original path in the dataset
89
- "code": datasets.features.Sequence(datasets.Value("string")), # Code Tokens
90
- }
91
-
92
- PYTHON_FILE_MAPPING = dict(train="python100k_train.txt", test="python50k_eval.txt")
93
-
94
- def generate_urls(self, split_name):
95
- language = self.info["parameters"]["language"]
96
- if language != "python":
97
- raise RuntimeError(f"Unknown language {language}")
98
-
99
- yield "data", "https://huggingface.co/datasets/code_x_glue_cc_code_completion_token/resolve/main/data/python/py150_files.zip"
100
-
101
- def process_string(self, token):
102
- # Copyright (c) Microsoft Corporation.
103
- # Licensed under the MIT License.
104
- import re
105
-
106
- str_quote_options = ["'''", '"""', "'", '"']
107
- start_quote = ""
108
- end_quote = ""
109
- qualifier_regex = r"^[a-z]+"
110
- qualifier_match = re.search(qualifier_regex, token)
111
- # string qualifiers like 'r' for regex, 'f' for formatted string, 'b' for bytes, 'u' for unicode, etc (or combination of them)
112
- qualifier = "" if not qualifier_match else qualifier_match[0]
113
- # token string without qualifiers
114
- token_string = re.sub(qualifier_regex, "", token)
115
- # string literal without quotes
116
- str_lit = token_string
117
- for q in str_quote_options:
118
- if token_string.startswith(q):
119
- start_quote = q
120
- str_lit = str_lit[len(q) :]
121
- if token_string.endswith(q):
122
- end_quote = q
123
- str_lit = str_lit[: -len(q)]
124
- break
125
- if start_quote in str_quote_options[:2]:
126
- return ""
127
- return (
128
- f"{qualifier}{start_quote}{str_lit}{end_quote}"
129
- if len(str_lit) < 15
130
- and "\n" not in str_lit
131
- and "</s>" not in str_lit
132
- and "<s>" not in str_lit
133
- and "<pad>" not in str_lit
134
- and "<EOL>" not in str_lit
135
- else f"{qualifier}{start_quote}{end_quote}"
136
- )
137
-
138
- def py_tokenize(self, base_dir, file_name):
139
- # Copyright (c) Microsoft Corporation.
140
- # Licensed under the MIT License.
141
- from io import BytesIO
142
- from tokenize import COMMENT, ENCODING, ENDMARKER, INDENT, NEWLINE, NL, NUMBER, STRING, tokenize
143
-
144
- file_paths = open(os.path.join(base_dir, file_name), encoding="utf-8").readlines()
145
- for ct, path in enumerate(file_paths):
146
- try:
147
- code = open(os.path.join(base_dir, path.strip()), encoding="utf-8").read()
148
- token_gen = tokenize(BytesIO(bytes(code, "utf8")).readline)
149
- out_tokens = []
150
- prev_eol = False
151
- for toknum, tokval, _, _, _ in token_gen:
152
- tokval = " ".join(tokval.split())
153
- if len(tokval) > 100:
154
- continue
155
- if toknum == STRING:
156
- add_token = self.process_string(tokval)
157
- if len(add_token) > 0:
158
- out_tokens.append(add_token)
159
- prev_eol = False
160
- elif toknum == NUMBER:
161
- if len(tokval) < 50:
162
- out_tokens.append(tokval)
163
- prev_eol = False
164
- elif toknum in [NEWLINE, NL]:
165
- if not prev_eol:
166
- out_tokens.append("<EOL>")
167
- prev_eol = True
168
- elif toknum in [COMMENT, INDENT, ENCODING, ENDMARKER] or len(tokval) == 0:
169
- continue
170
- else:
171
- out_tokens.append(tokval)
172
- prev_eol = False
173
- if out_tokens[0] == "<EOL>":
174
- out_tokens = out_tokens[1:]
175
- if out_tokens[-1] == "<EOL>":
176
- out_tokens = out_tokens[:-1]
177
- except Exception:
178
- out_tokens = []
179
- out_tokens = ["<s>"] + out_tokens + ["</s>"]
180
- yield path, out_tokens
181
-
182
- def _generate_examples(self, split_name, file_paths):
183
- base_dir = file_paths["data"]
184
- filename = self.PYTHON_FILE_MAPPING[split_name]
185
-
186
- idx = 0
187
- for entry in self.py_tokenize(base_dir=base_dir, file_name=filename):
188
- path, out_tokens = entry
189
- path = path[len("data/") :]
190
- yield idx, dict(id=idx, path=path, code=out_tokens)
191
- idx += 1
192
-
193
-
194
- CLASS_MAPPING = {
195
- "CodeXGlueCcCodeCompletionTokenJava": CodeXGlueCcCodeCompletionTokenJavaImpl,
196
- "CodeXGlueCcCodeCompletionTokenPython": CodeXGlueCcCodeCompletionTokenPythonImpl,
197
- }
198
-
199
-
200
- class CodeXGlueCcCodeCompletionToken(datasets.GeneratorBasedBuilder):
201
- BUILDER_CONFIG_CLASS = datasets.BuilderConfig
202
- BUILDER_CONFIGS = [
203
- datasets.BuilderConfig(name=name, description=info["description"]) for name, info in DEFINITIONS.items()
204
- ]
205
-
206
- def _info(self):
207
- name = self.config.name
208
- info = DEFINITIONS[name]
209
- if info["class_name"] in CLASS_MAPPING:
210
- self.child = CLASS_MAPPING[info["class_name"]](info)
211
- else:
212
- raise RuntimeError(f"Unknown python class for dataset configuration {name}")
213
- ret = self.child._info()
214
- return ret
215
-
216
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
217
- return self.child._split_generators(dl_manager=dl_manager)
218
-
219
- def _generate_examples(self, split_name, file_paths):
220
- return self.child._generate_examples(split_name, file_paths)