codequeries / codequeries.py
thepurpleowl's picture
Update codequeries.py
1ebe910
raw
history blame
9.35 kB
# coding=utf-8
# Copyright 2022 CodeQueries Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The CodeQueries benchmark."""
import json
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_CODEQUERIES_CITATION = """\
@article{codequeries2022,
title={Learning to Answer Semantic Queries over Code},
author={A, B, C, D, E, F},
journal={arXiv preprint arXiv:<.>},
year={2022}
}
"""
_IDEAL_DESCRIPTION = """\
CodeQueries Ideal setup.
"""
_PREFIX_DESCRIPTION = """\
CodeQueries Prefix setup."""
_FILE_IDEAL_DESCRIPTION = """\
CodeQueries File level Ideal setup."""
_TWOSTEP_DESCRIPTION = """\
CodeQueries Twostep setup."""
class CodequeriesConfig(datasets.BuilderConfig):
"""BuilderConfig for Codequeries."""
def __init__(self, features, citation, data_url, url, **kwargs):
"""BuilderConfig for Codequeries.
Args:
features: `list[string]`, list of the features that will appear in the
feature dict. Should not include "label".
citation: `string`, citation for the data set.
**kwargs: keyword arguments forwarded to super.
"""
# Version history:
# 1.0.0: Initial version.
super(CodequeriesConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.features = features
self.citation = citation
self.data_url = data_url
self.url = url
class Codequeries(datasets.GeneratorBasedBuilder):
"""The Codequeries benchmark."""
BUILDER_CONFIGS = [
CodequeriesConfig(
name="ideal",
description=_IDEAL_DESCRIPTION,
features=["query_name", "context_blocks", "answer_spans",
"supporting_fact_spans", "code_file_path", "example_type",
"subtokenized_input_sequence", "label_sequence"],
citation=_CODEQUERIES_CITATION,
data_url={
"train": "ideal_train.json",
"dev": "ideal_val.json",
"test": "ideal_test.json"
},
url="",
),
CodequeriesConfig(
name="prefix",
description=_PREFIX_DESCRIPTION,
features=["query_name", "answer_spans",
"supporting_fact_spans", "code_file_path", "example_type",
"subtokenized_input_sequence", "label_sequence"],
citation=_CODEQUERIES_CITATION,
data_url={
"test": "prefix_test.json"
},
url="",
),
CodequeriesConfig(
name="file_ideal",
description=_FILE_IDEAL_DESCRIPTION,
features=["query_name", "context_blocks", "answer_spans",
"supporting_fact_spans", "code_file_path", "example_type",
"subtokenized_input_sequence", "label_sequence"],
citation=_CODEQUERIES_CITATION,
data_url={
"test": "file_ideal_test.json"
},
url="",
),
CodequeriesConfig(
name="twostep",
description=_TWOSTEP_DESCRIPTION,
features=["query_name", "context_blocks", "answer_spans",
"supporting_fact_spans", "code_file_path", "example_type",
"subtokenized_input_sequence", "label_sequence"],
citation=_CODEQUERIES_CITATION,
data_url={
"test": ["twostep_relevance/" + "twostep_relevance_test_" + str(i) + ".json" for i in range(0,10)]
},
url="",
),
]
DEFAULT_CONFIG_NAME = "ideal"
def _info(self):
features = {}
features["query_name"] = datasets.Value("string")
features["context_blocks"] = [
{
"content": datasets.Value("string"),
"metadata": datasets.Value("string"),
"header": datasets.Value("string")
}
]
features["answer_spans"] = [
{
'span': datasets.Value("string"),
'start_line': datasets.Value("int32"),
'start_column': datasets.Value("int32"),
'end_line': datasets.Value("int32"),
'end_column': datasets.Value("int32")
}
]
features["supporting_fact_spans"] = [
{
'span': datasets.Value("string"),
'start_line': datasets.Value("int32"),
'start_column': datasets.Value("int32"),
'end_line': datasets.Value("int32"),
'end_column': datasets.Value("int32")
}
]
features["code_file_path"] = datasets.Value("string")
features["example_type"] = datasets.Value("int32")
features["subtokenized_input_sequence"] = datasets.features.Sequence(datasets.Value("string"))
features["label_sequence"] = datasets.features.Sequence(datasets.Value("int32"))
features["relevance_label"] = datasets.Value("int32")
return datasets.DatasetInfo(
description=self.config.description,
features=datasets.Features(features),
homepage=self.config.url,
citation=_CODEQUERIES_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
print(dl_dir)
if self.config.name in ["prefix", "file_ideal", "twostep"]:
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": dl_dir["test"],
"split": datasets.Split.TEST,
},
),
]
else:
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": dl_dir["train"],
"split": datasets.Split.TRAIN,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": dl_dir["dev"],
"split": datasets.Split.VALIDATION,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": dl_dir["test"],
"split": datasets.Split.TEST,
},
),
]
def _generate_examples(self, filepath, split):
if self.config.name in ["prefix", "file_ideal", "twostep"]:
assert split == datasets.Split.TEST
logger.info("generating examples from = %s", filepath)
if self.config.name == "twostep":
key = 0
for fp in filepath:
with open(fp, encoding="utf-8") as f:
for line in f:
row = json.loads(line)
instance_key = str(key) + "_" + row["query_name"] + "_" + row["code_file_path"]
yield instance_key, {
"query_name": row["query_name"],
"context_blocks": row["context_blocks"],
"answer_spans": row["answer_spans"],
"supporting_fact_spans": row["supporting_fact_spans"],
"code_file_path": row["code_file_path"],
"example_type": row["example_type"],
"subtokenized_input_sequence": row["subtokenized_input_sequence"],
"relevance_label": row["relevance_label"],
}
key += 1
else:
with open(filepath, encoding="utf-8") as f:
key = 0
for line in f:
row = json.loads(line)
instance_key = str(key) + "_" + row["query_name"] + "_" + row["code_file_path"]
yield instance_key, {
"query_name": row["query_name"],
"context_blocks": row["context_blocks"],
"answer_spans": row["answer_spans"],
"supporting_fact_spans": row["supporting_fact_spans"],
"code_file_path": row["code_file_path"],
"example_type": row["example_type"],
"subtokenized_input_sequence": row["subtokenized_input_sequence"],
"label_sequence": row["label_sequence"],
}
key += 1