codequeries / codequeries.py
sps's picture
Update data script - cast key to string
8e0f46c
raw
history blame
8.57 kB
# coding=utf-8
# Copyright 2022 CodeQueries Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The CodeQueries benchmark."""
import json
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_CODEQUERIES_CITATION = """\
@article{codequeries2022,
title={Learning to Answer Semantic Queries over Code},
author={A, B, C, D, E, F},
journal={arXiv preprint arXiv:<.>},
year={2022}
}
"""
_IDEAL_DESCRIPTION = """\
CodeQueries Ideal setup.
"""
_PREFIX_DESCRIPTION = """\
CodeQueries Prefix setup."""
_SLIDING_WINDOW_DESCRIPTION = """\
CodeQueries Sliding window setup."""
_FILE_IDEAL_DESCRIPTION = """\
CodeQueries File level Ideal setup."""
_TWOSTEP_DESCRIPTION = """\
CodeQueries Twostep setup."""
class CodequeriesConfig(datasets.BuilderConfig):
"""BuilderConfig for Codequeries."""
def __init__(self, features, citation, url, **kwargs):
"""BuilderConfig for Codequeries.
Args:
features: `list[string]`, list of the features that will appear in the
feature dict. Should not include "label".
citation: `string`, citation for the data set.
**kwargs: keyword arguments forwarded to super.
"""
# Version history:
# 1.0.0: Initial version.
super(CodequeriesConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.features = features
self.citation = citation
self.url = url
class Codequeries(datasets.GeneratorBasedBuilder):
"""The Codequeries benchmark."""
BUILDER_CONFIGS = [
CodequeriesConfig(
name="ideal",
description=_IDEAL_DESCRIPTION,
features=["query_name", "context_blocks", "answer_spans",
"supporting_fact_spans", "code_file_path", "example_type",
"subtokenized_input_sequence", "label_sequence"],
citation=_CODEQUERIES_CITATION,
# homepage="",
# data_url="",
url="",
),
# CodequeriesConfig(
# name="prefix",
# description=_PREFIX_DESCRIPTION,
# features=["query_name", "context_blocks", "answer_spans",
# "supporting_fact_spans", "code_file_path", "example_type",
# "subtokenized_input_sequence", "label_sequence"],
# citation=_CODEQUERIES_CITATION,
# url="",
# ),
# CodequeriesConfig(
# name="sliding_window",
# description=_SLIDING_WINDOW_DESCRIPTION,
# features=["query_name", "context_blocks", "answer_spans",
# "supporting_fact_spans", "code_file_path", "example_type",
# "subtokenized_input_sequence", "label_sequence"],
# citation=_CODEQUERIES_CITATION,
# url="",
# ),
CodequeriesConfig(
name="file_ideal",
description=_FILE_IDEAL_DESCRIPTION,
features=["query_name", "context_blocks", "answer_spans",
"supporting_fact_spans", "code_file_path", "example_type",
"subtokenized_input_sequence", "label_sequence"],
citation=_CODEQUERIES_CITATION,
url="",
),
# CodequeriesConfig(
# name="twostep",
# description=_TWOSTEP_DESCRIPTION,
# features=["query_name", "context_blocks", "answer_spans",
# "supporting_fact_spans", "code_file_path", "example_type",
# "subtokenized_input_sequence", "label_sequence"],
# citation=_CODEQUERIES_CITATION,
# url="",
# ),
]
# DEFAULT_CONFIG_NAME = "ideal"
def _info(self):
# features = {feature: datasets.Value("string") for feature in self.config.features}
features = {}
features["query_name"] = datasets.Value("string")
features["context_blocks"] = datasets.features.Sequence(
{
"content": datasets.Value("string"),
"metadata": datasets.Value("string"),
"header": datasets.Value("string")
}
)
features["answer_spans"] = datasets.features.Sequence(
{
'span': datasets.Value("string"),
'start_line': datasets.Value("int32"),
'start_column': datasets.Value("int32"),
'end_line': datasets.Value("int32"),
'end_column': datasets.Value("int32")
}
)
features["supporting_fact_spans"] = datasets.features.Sequence(
{
'span': datasets.Value("string"),
'start_line': datasets.Value("int32"),
'start_column': datasets.Value("int32"),
'end_line': datasets.Value("int32"),
'end_column': datasets.Value("int32")
}
)
features["code_file_path"] = datasets.Value("string")
features["example_type"] = datasets.Value("int32")
features["subtokenized_input_sequence"] = datasets.features.Sequence(datasets.Value("string"))
features["label_sequence"] = datasets.features.Sequence(datasets.Value("int32"))
return datasets.DatasetInfo(
description=self.config.description,
features=datasets.Features(features),
homepage=self.config.url,
citation=_CODEQUERIES_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = ""
if self.config.name in ["prefix", "sliding_window", "file_ideal", "twostep"]:
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(dl_dir, self.config.name + "_test.json"),
"split": datasets.Split.TEST,
},
),
]
else:
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(dl_dir, self.config.name + "_train.json"),
"split": datasets.Split.TRAIN,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(dl_dir, self.config.name + "_val.json"),
"split": datasets.Split.VALIDATION,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(dl_dir, self.config.name + "_test.json"),
"split": datasets.Split.TEST,
},
),
]
def _generate_examples(self, filepath, split):
if self.config.name in ["prefix", "sliding_window", "file_ideal", "twostep"]:
assert split == datasets.Split.TEST
logger.info("generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
key = 0
for line in f:
row = json.loads(line)
instance_key = str(key) + "_" + row["query_name"] + "_" + row["code_file_path"]
yield instance_key, {
"query_name": row["query_name"],
"context_blocks": row["context_blocks"],
"answer_spans": row["answer_spans"],
"supporting_fact_spans": row["supporting_fact_spans"],
"code_file_path": row["code_file_path"],
"example_type": row["example_type"],
"subtokenized_input_sequence ": row["subtokenized_input_sequence "],
"label_sequence": row["label_sequence"],
}
key += 1