albertvillanova HF staff commited on
Commit
87eae99
1 Parent(s): e805461

Delete loading script

Browse files
Files changed (1) hide show
  1. code_x_glue_ct_code_to_text.py +0 -155
code_x_glue_ct_code_to_text.py DELETED
@@ -1,155 +0,0 @@
1
- import json
2
- import os
3
- import os.path
4
- from typing import List
5
-
6
- import datasets
7
-
8
- from .common import TrainValidTestChild
9
- from .generated_definitions import DEFINITIONS
10
-
11
-
12
- _DESCRIPTION = """The dataset we use comes from CodeSearchNet and we filter the dataset as the following:
13
- - Remove examples that codes cannot be parsed into an abstract syntax tree.
14
- - Remove examples that #tokens of documents is < 3 or >256
15
- - Remove examples that documents contain special tokens (e.g. <img ...> or https:...)
16
- - Remove examples that documents are not English.
17
- """
18
- _CITATION = """@article{husain2019codesearchnet,
19
- title={Codesearchnet challenge: Evaluating the state of semantic code search},
20
- author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc},
21
- journal={arXiv preprint arXiv:1909.09436},
22
- year={2019}
23
- }"""
24
-
25
-
26
- class CodeXGlueCtCodeToTextBaseImpl(TrainValidTestChild):
27
- _DESCRIPTION = _DESCRIPTION
28
- _CITATION = _CITATION
29
-
30
- # For each file, each line in the uncompressed file represents one function.
31
- _FEATURES = {
32
- "id": datasets.Value("int32"), # Index of the sample
33
- "repo": datasets.Value("string"), # repo: the owner/repo
34
- "path": datasets.Value("string"), # path: the full path to the original file
35
- "func_name": datasets.Value("string"), # func_name: the function or method name
36
- "original_string": datasets.Value("string"), # original_string: the raw string before tokenization or parsing
37
- "language": datasets.Value("string"), # language: the programming language name
38
- "code": datasets.Value("string"), # code/function: the part of the original_string that is code
39
- "code_tokens": datasets.features.Sequence(
40
- datasets.Value("string")
41
- ), # code_tokens/function_tokens: tokenized version of code
42
- "docstring": datasets.Value(
43
- "string"
44
- ), # docstring: the top-level comment or docstring, if it exists in the original string
45
- "docstring_tokens": datasets.features.Sequence(
46
- datasets.Value("string")
47
- ), # docstring_tokens: tokenized version of docstring
48
- "sha": datasets.Value("string"), # sha of the file
49
- "url": datasets.Value("string"), # url of the file
50
- }
51
-
52
- _SUPERVISED_KEYS = ["docstring", "docstring_tokens"]
53
-
54
- def generate_urls(self, split_name, language):
55
- yield "language", f"https://huggingface.co/datasets/code_search_net/resolve/main/data/{language}.zip"
56
- yield "dataset", "dataset.zip"
57
-
58
- def get_data_files(self, split_name, file_paths, language):
59
- language_specific_path = file_paths["language"]
60
- final_path = os.path.join(language_specific_path, language, "final")
61
- # Make some cleanup to save space
62
- for path in os.listdir(final_path):
63
- if path.endswith(".pkl"):
64
- os.unlink(path)
65
-
66
- data_files = []
67
- for root, dirs, files in os.walk(final_path):
68
- for file in files:
69
- temp = os.path.join(root, file)
70
- if ".jsonl" in temp:
71
- if split_name in temp:
72
- data_files.append(temp)
73
- return data_files
74
-
75
- def post_process(self, split_name, language, js):
76
- return js
77
-
78
- def _generate_examples(self, split_name, file_paths, language):
79
- import gzip
80
-
81
- data_set_path = file_paths["dataset"]
82
-
83
- data_files = self.get_data_files(split_name, file_paths, language)
84
-
85
- urls = {}
86
- f1_path_parts = [data_set_path, "dataset", language, f"{split_name}.txt"]
87
- if self.SINGLE_LANGUAGE:
88
- del f1_path_parts[2]
89
-
90
- f1_path = os.path.join(*f1_path_parts)
91
- with open(f1_path, encoding="utf-8") as f1:
92
- for line in f1:
93
- line = line.strip()
94
- urls[line] = True
95
-
96
- idx = 0
97
- for file in data_files:
98
- if ".gz" in file:
99
- f = gzip.open(file)
100
- else:
101
- f = open(file, encoding="utf-8")
102
-
103
- for line in f:
104
- line = line.strip()
105
- js = json.loads(line)
106
- if js["url"] in urls:
107
- js["id"] = idx
108
- js = self.post_process(split_name, language, js)
109
- if "partition" in js:
110
- del js["partition"]
111
- yield idx, js
112
- idx += 1
113
- f.close()
114
-
115
-
116
- class CodeXGlueCtCodeToTextImpl(CodeXGlueCtCodeToTextBaseImpl):
117
- SINGLE_LANGUAGE = False
118
-
119
- def generate_urls(self, split_name):
120
- language = self.info["parameters"]["language"]
121
- for e in super().generate_urls(split_name, language):
122
- yield e
123
-
124
- def _generate_examples(self, split_name, file_paths):
125
- language = self.info["parameters"]["language"]
126
- for e in super()._generate_examples(split_name, file_paths, language):
127
- yield e
128
-
129
-
130
- CLASS_MAPPING = {
131
- "CodeXGlueCtCodeToText": CodeXGlueCtCodeToTextImpl,
132
- }
133
-
134
-
135
- class CodeXGlueCtCodeToText(datasets.GeneratorBasedBuilder):
136
- BUILDER_CONFIG_CLASS = datasets.BuilderConfig
137
- BUILDER_CONFIGS = [
138
- datasets.BuilderConfig(name=name, description=info["description"]) for name, info in DEFINITIONS.items()
139
- ]
140
-
141
- def _info(self):
142
- name = self.config.name
143
- info = DEFINITIONS[name]
144
- if info["class_name"] in CLASS_MAPPING:
145
- self.child = CLASS_MAPPING[info["class_name"]](info)
146
- else:
147
- raise RuntimeError(f"Unknown python class for dataset configuration {name}")
148
- ret = self.child._info()
149
- return ret
150
-
151
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
152
- return self.child._split_generators(dl_manager=dl_manager)
153
-
154
- def _generate_examples(self, split_name, file_paths):
155
- return self.child._generate_examples(split_name, file_paths)