code_search_net_files / code_search_net_files.py
andstor's picture
Add initial data
5190e5d
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""CodeSearchNet corpus Files: proxy dataset for semantic code search"""
# TODO: add licensing info in the examples
# TODO: log richer informations (especially while extracting the jsonl.gz files)
# TODO: enable custom configs; such as: "java+python"
# TODO: enable fetching examples with a given license, eg: "java_MIT"
import json
import os
import datasets
_CITATION = """\
@article{husain2019codesearchnet,
title={{CodeSearchNet} challenge: Evaluating the state of semantic code search},
author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc},
journal={arXiv preprint arXiv:1909.09436},
year={2019}
}
"""
_DESCRIPTION = """\
CodeSearchNet corpus contains about 6 million functions from open-source code \
spanning six programming languages (Go, Java, JavaScript, PHP, Python, and Ruby). \
The CodeSearchNet Corpus also contains automatically generated query-like \
natural language for 2 million functions, obtained from mechanically scraping \
and preprocessing associated function documentation.
"""
_HOMEPAGE = "https://github.com/github/CodeSearchNet"
_LICENSE = "Various"
_DATA_DIR_URL = "data"
_AVAILABLE_LANGUAGES = ["python", "java", "javascript", "go", "ruby", "php"]
_URLs = {language: f"{language}.jsonl" for language in _AVAILABLE_LANGUAGES}
# URLs for "all" are just the concatenation of URLs for all languages
_URLs["all"] = _URLs.copy()
class CodeSearchNet(datasets.GeneratorBasedBuilder):
""" "Extended CodeSearchNet corpus: proxy dataset for semantic code search."""
VERSION = datasets.Version("1.0.0", "Add Extended CodeSearchNet corpus dataset")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="all",
version=VERSION,
description="All available languages: Java, Go, Javascript, Python, PHP, Ruby",
),
datasets.BuilderConfig(
name="java",
version=VERSION,
description="Java language",
),
datasets.BuilderConfig(
name="go",
version=VERSION,
description="Go language",
),
datasets.BuilderConfig(
name="python",
version=VERSION,
description="Pyhton language",
),
datasets.BuilderConfig(
name="javascript",
version=VERSION,
description="Javascript language",
),
datasets.BuilderConfig(
name="ruby",
version=VERSION,
description="Ruby language",
),
datasets.BuilderConfig(
name="php",
version=VERSION,
description="PHP language",
),
]
DEFAULT_CONFIG_NAME = "all"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"repository_name": datasets.Value("string"),
"file_path": datasets.Value("string"),
"language": datasets.Value("string"),
"url": datasets.Value("string"),
"contents": datasets.Value("string")
# TODO - add licensing info in the examples
}
),
# No default supervised keys
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators.
Note: The original data is stored in S3, and follows this unusual directory structure:
```
.
β”œβ”€β”€ <language_name> # e.g. python
β”‚ └── final
β”‚ └── jsonl
β”‚ β”œβ”€β”€ test
β”‚ β”‚ └── <language_name>_test_0.jsonl.gz
β”‚ β”œβ”€β”€ train
β”‚ β”‚ β”œβ”€β”€ <language_name>_train_0.jsonl.gz
β”‚ β”‚ β”œβ”€β”€ <language_name>_train_1.jsonl.gz
β”‚ β”‚ β”œβ”€β”€ ...
β”‚ β”‚ └── <language_name>_train_n.jsonl.gz
β”‚ └── valid
β”‚ └── <language_name>_valid_0.jsonl.gz
β”œβ”€β”€ <language_name>_dedupe_definitions_v2.pkl
└── <language_name>_licenses.pkl
```
"""
data_urls = _URLs[self.config.name]
if isinstance(data_urls, str):
data_urls = {self.config.name: data_urls}
split2dirs = {
split_name: [os.path.join(_DATA_DIR_URL, split_name, path) for path in data_urls.values()]
for split_name in ["train", "test", "valid"]
}
downloaded_files = dl_manager.download_and_extract(split2dirs)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepaths": downloaded_files["train"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepaths": downloaded_files["test"],
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepaths": downloaded_files["valid"],
},
),
]
def _generate_examples(self, filepaths):
"""Yields the examples by iterating through the available jsonl files."""
for file_id_, filepath in enumerate(filepaths):
with open(filepath, encoding="utf-8") as f:
for row_id_, row in enumerate(f):
# Key of the example = file_id + row_id,
# to ensure all examples have a distinct key
id_ = f"{file_id_}_{row_id_}"
data = json.loads(row)
yield id_, {
"repository_name": data["repository_name"],
"file_path": data["file_path"],
"language": data["language"],
"url": data["url"],
"contents": data["contents"],
}