albertvillanova HF staff commited on
Commit
2d22038
1 Parent(s): bad29ae

Delete loading script

Browse files
Files changed (1) hide show
  1. code_x_glue_tc_nl_code_search_adv.py +0 -206
code_x_glue_tc_nl_code_search_adv.py DELETED
@@ -1,206 +0,0 @@
1
- import json
2
- import os
3
- import os.path
4
- from typing import List
5
-
6
- import datasets
7
-
8
- from .common import TrainValidTestChild
9
- from .generated_definitions import DEFINITIONS
10
-
11
-
12
- _DESCRIPTION = """The dataset we use comes from CodeSearchNet and we filter the dataset as the following:
13
- - Remove examples that codes cannot be parsed into an abstract syntax tree.
14
- - Remove examples that #tokens of documents is < 3 or >256
15
- - Remove examples that documents contain special tokens (e.g. <img ...> or https:...)
16
- - Remove examples that documents are not English.
17
- """
18
- _CITATION = """@article{husain2019codesearchnet,
19
- title={Codesearchnet challenge: Evaluating the state of semantic code search},
20
- author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc},
21
- journal={arXiv preprint arXiv:1909.09436},
22
- year={2019}
23
- }"""
24
-
25
-
26
- class CodeXGlueCtCodeToTextBaseImpl(TrainValidTestChild):
27
- _DESCRIPTION = _DESCRIPTION
28
- _CITATION = _CITATION
29
-
30
- # For each file, each line in the uncompressed file represents one function.
31
- _FEATURES = {
32
- "id": datasets.Value("int32"), # Index of the sample
33
- "repo": datasets.Value("string"), # repo: the owner/repo
34
- "path": datasets.Value("string"), # path: the full path to the original file
35
- "func_name": datasets.Value("string"), # func_name: the function or method name
36
- "original_string": datasets.Value("string"), # original_string: the raw string before tokenization or parsing
37
- "language": datasets.Value("string"), # language: the programming language name
38
- "code": datasets.Value("string"), # code/function: the part of the original_string that is code
39
- "code_tokens": datasets.features.Sequence(
40
- datasets.Value("string")
41
- ), # code_tokens/function_tokens: tokenized version of code
42
- "docstring": datasets.Value(
43
- "string"
44
- ), # docstring: the top-level comment or docstring, if it exists in the original string
45
- "docstring_tokens": datasets.features.Sequence(
46
- datasets.Value("string")
47
- ), # docstring_tokens: tokenized version of docstring
48
- "sha": datasets.Value("string"), # sha of the file
49
- "url": datasets.Value("string"), # url of the file
50
- }
51
-
52
- _SUPERVISED_KEYS = ["docstring", "docstring_tokens"]
53
-
54
- def generate_urls(self, split_name, language):
55
- yield "language", f"https://huggingface.co/datasets/code_search_net/resolve/main/data/{language}.zip"
56
- yield "dataset", "dataset.zip"
57
-
58
- def get_data_files(self, split_name, file_paths, language):
59
- language_specific_path = file_paths["language"]
60
- final_path = os.path.join(language_specific_path, language, "final")
61
- # Make some cleanup to save space
62
- for path in os.listdir(final_path):
63
- if path.endswith(".pkl"):
64
- os.unlink(path)
65
-
66
- data_files = []
67
- for root, dirs, files in os.walk(final_path):
68
- for file in files:
69
- temp = os.path.join(root, file)
70
- if ".jsonl" in temp:
71
- if split_name in temp:
72
- data_files.append(temp)
73
- return data_files
74
-
75
- def post_process(self, split_name, language, js):
76
- return js
77
-
78
- def _generate_examples(self, split_name, file_paths, language):
79
- import gzip
80
-
81
- data_set_path = file_paths["dataset"]
82
-
83
- data_files = self.get_data_files(split_name, file_paths, language)
84
-
85
- urls = {}
86
- f1_path_parts = [data_set_path, "dataset", language, f"{split_name}.txt"]
87
- if self.SINGLE_LANGUAGE:
88
- del f1_path_parts[2]
89
-
90
- f1_path = os.path.join(*f1_path_parts)
91
- with open(f1_path, encoding="utf-8") as f1:
92
- for line in f1:
93
- line = line.strip()
94
- urls[line] = True
95
-
96
- idx = 0
97
- for file in data_files:
98
- if ".gz" in file:
99
- f = gzip.open(file)
100
- else:
101
- f = open(file, encoding="utf-8")
102
-
103
- for line in f:
104
- line = line.strip()
105
- js = json.loads(line)
106
- if js["url"] in urls:
107
- js["id"] = idx
108
- js = self.post_process(split_name, language, js)
109
- if "partition" in js:
110
- del js["partition"]
111
- yield idx, js
112
- idx += 1
113
- f.close()
114
-
115
-
116
- class CodeXGlueTcNLCodeSearchAdvImpl(CodeXGlueCtCodeToTextBaseImpl):
117
- LANGUAGE = "python"
118
- SINGLE_LANGUAGE = True
119
-
120
- _FEATURES = {
121
- "id": datasets.Value("int32"), # Index of the sample
122
- "repo": datasets.Value("string"), # repo: the owner/repo
123
- "path": datasets.Value("string"), # path: the full path to the original file
124
- "func_name": datasets.Value("string"), # func_name: the function or method name
125
- "original_string": datasets.Value("string"), # original_string: the raw string before tokenization or parsing
126
- "language": datasets.Value("string"), # language: the programming language
127
- "code": datasets.Value("string"), # code/function: the part of the original_string that is code
128
- "code_tokens": datasets.features.Sequence(
129
- datasets.Value("string")
130
- ), # code_tokens/function_tokens: tokenized version of code
131
- "docstring": datasets.Value(
132
- "string"
133
- ), # docstring: the top-level comment or docstring, if it exists in the original string
134
- "docstring_tokens": datasets.features.Sequence(
135
- datasets.Value("string")
136
- ), # docstring_tokens: tokenized version of docstring
137
- "sha": datasets.Value("string"), # sha of the file
138
- "url": datasets.Value("string"), # url of the file
139
- "docstring_summary": datasets.Value("string"), # Summary of the docstring
140
- "parameters": datasets.Value("string"), # parameters of the function
141
- "return_statement": datasets.Value("string"), # return statement
142
- "argument_list": datasets.Value("string"), # list of arguments of the function
143
- "identifier": datasets.Value("string"), # identifier
144
- "nwo": datasets.Value("string"), # nwo
145
- "score": datasets.Value("float"), # score for this search
146
- }
147
-
148
- def post_process(self, split_name, language, js):
149
- for suffix in "_tokens", "":
150
- key = "function" + suffix
151
- if key in js:
152
- js["code" + suffix] = js[key]
153
- del js[key]
154
-
155
- for key in self._FEATURES:
156
- if key not in js:
157
- if key == "score":
158
- js[key] = -1
159
- else:
160
- js[key] = ""
161
-
162
- return js
163
-
164
- def generate_urls(self, split_name):
165
- for e in super().generate_urls(split_name, self.LANGUAGE):
166
- yield e
167
-
168
- def get_data_files(self, split_name, file_paths, language):
169
- if split_name == "train":
170
- return super().get_data_files(split_name, file_paths, language)
171
- else:
172
- data_set_path = file_paths["dataset"]
173
- data_file = os.path.join(data_set_path, "dataset", "test_code.jsonl")
174
- return [data_file]
175
-
176
- def _generate_examples(self, split_name, file_paths):
177
- for e in super()._generate_examples(split_name, file_paths, self.LANGUAGE):
178
- yield e
179
-
180
-
181
- CLASS_MAPPING = {
182
- "CodeXGlueTcNLCodeSearchAdv": CodeXGlueTcNLCodeSearchAdvImpl,
183
- }
184
-
185
-
186
- class CodeXGlueTcNlCodeSearchAdv(datasets.GeneratorBasedBuilder):
187
- BUILDER_CONFIG_CLASS = datasets.BuilderConfig
188
- BUILDER_CONFIGS = [
189
- datasets.BuilderConfig(name=name, description=info["description"]) for name, info in DEFINITIONS.items()
190
- ]
191
-
192
- def _info(self):
193
- name = self.config.name
194
- info = DEFINITIONS[name]
195
- if info["class_name"] in CLASS_MAPPING:
196
- self.child = CLASS_MAPPING[info["class_name"]](info)
197
- else:
198
- raise RuntimeError(f"Unknown python class for dataset configuration {name}")
199
- ret = self.child._info()
200
- return ret
201
-
202
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
203
- return self.child._split_generators(dl_manager=dl_manager)
204
-
205
- def _generate_examples(self, split_name, file_paths):
206
- return self.child._generate_examples(split_name, file_paths)