File size: 6,203 Bytes
28f1a41 18ffa24 28f1a41 18ffa24 28f1a41 18ffa24 28f1a41 18ffa24 28f1a41 18ffa24 28f1a41 18ffa24 28f1a41 18ffa24 28f1a41 bdf1661 28f1a41 18ffa24 28f1a41 bdf1661 28f1a41 18ffa24 28f1a41 bdf1661 28f1a41 18ffa24 28f1a41 bdf1661 28f1a41 18ffa24 28f1a41 bdf1661 28f1a41 18ffa24 8f67e51 18ffa24 8f67e51 bdf1661 8f67e51 28f1a41 be108c7 28f1a41 1cc03db 28f1a41 1cc03db f553a8c 1cc03db bdf1661 28f1a41 8f67e51 28f1a41 be108c7 28f1a41 62e6492 28f1a41 62e6492 f553a8c 62e6492 bdf1661 28f1a41 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
import json
import datasets
_DESCRIPTION = """
"""
_HOMEPAGE = "https://github.com/bigcode-project/octopack"
def get_url(name):
url = f"data/{name}/data/humanevalpack.jsonl"
return url
def split_generator(dl_manager, name):
downloaded_files = dl_manager.download(get_url(name))
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": downloaded_files,
},
)
]
class HumanEvalPackConfig(datasets.BuilderConfig):
"""BuilderConfig """
def __init__(self, name, description, features, **kwargs):
super(HumanEvalXBugsConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
self.name = name
self.description = description
self.features = features
class HumanEvalPack(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
HumanEvalPackConfig(
name="python",
description="Python HumanEvalPack",
features=[
"task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point", "signature", "docstring", "instruction"
]
),
HumanEvalPackConfig(
name="js",
description="JavaScript HumanEvalPack",
features=[
"task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point", "signature", "docstring", "instruction"
]
),
HumanEvalPackConfig(
name="java",
description="Java HumanEvalPack",
features=[
"task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point", "signature", "docstring", "instruction"
]
),
HumanEvalPackConfig(
name="go",
description="Go HumanEvalPack",
features=[
"task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point", "signature", "docstring", "instruction"
]
),
HumanEvalPackConfig(
name="cpp",
description="C++ HumanEvalPack",
features=[
"task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point", "signature", "docstring", "instruction"
]
),
HumanEvalPackConfig(
name="rust",
description="Rust HumanEvalPack",
features=[
"task_id", "prompt", "import", "declaration", "buggy_solution", "canonical_solution", "test", "test_setup", "example_test", "bug_type", "failure_symptoms", "entry_point", "signature", "docstring", "instruction"
]
),
]
DEFAULT_CONFIG_NAME = "python"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"task_id": datasets.Value("string"),
"prompt": datasets.Value("string"),
"import": datasets.Value("string"),
"declaration": datasets.Value("string"),
"canonical_solution": datasets.Value("string"),
"buggy_solution": datasets.Value("string"),
"bug_type": datasets.Value("string"),
"failure_symptoms": datasets.Value("string"),
"entry_point": datasets.Value("string"),
"test": datasets.Value("string"),
"test_setup": datasets.Value("string"),
"example_test": datasets.Value("string"),
"signature": datasets.Value("string"),
"docstring": datasets.Value("string"),
"instruction": datasets.Value("string"),
}
),
homepage=_HOMEPAGE,
)
def _split_generators(self, dl_manager):
if self.config.name == "python":
return split_generator(dl_manager, self.config.name)
elif self.config.name == "cpp":
return split_generator(dl_manager, self.config.name)
elif self.config.name == "go":
return split_generator(dl_manager, self.config.name)
elif self.config.name == "java":
return split_generator(dl_manager, self.config.name)
elif self.config.name == "js":
return split_generator(dl_manager, self.config.name)
elif self.config.name == "rust":
return split_generator(dl_manager, self.config.name)
def _generate_examples(self, filepath):
key = 0
with open(filepath) as f:
for line in f:
row = json.loads(line)
key += 1
yield key, {
"task_id": row["task_id"],
"prompt": row["prompt"],
"import": row.get("import", ""), # Only for Go
"declaration": row["declaration"],
"buggy_solution": row["buggy_solution"],
"canonical_solution": row["canonical_solution"],
"bug_type": row["bug_type"],
"failure_symptoms": row["failure_symptoms"],
"entry_point": row["entry_point"],
"test": row["test"],
"test_setup": row.get("test_setup", ""), # Only for Go
"example_test": row["example_test"],
"signature": row["signature"],
"docstring": row["docstring"],
"instruction": row["instruction"],
}
key += 1 |