File size: 5,973 Bytes
53588e8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
from __future__ import annotations
import json
from pathlib import Path
from typing import Generator
import datasets
from .base import BaseDatasetProcessor
from .chabsa import ChabsaDatasetProcessor
from .coreference import WikipediaCoreferenceDatasetProcessor
from .dependency import WikipediaDependencyDatasetProcessor
from .jamp import JAMPDatasetProcessor
from .janli import JaNLIDatasetProcessor
from .jcommonsenseqa import JCommonsenseQADatasetProcessor
from .jemhopqa import JEMHopQADatasetProcessor
from .jnli import JNLIDatasetProcessor
from .jsem import JSeMDatasetProcessor
from .jsick import JSICKDatasetProcessor
from .jsquad import JSQuADDatasetProcessor
from .jsts import JSTSDatasetProcessor
from .mawps import MawpsDatasetProcessor
from .ner import WikipediaNERDatasetProcessor
from .niilc import NIILCDatasetProcessor
from .pas import WikipediaPASDatasetProcessor
from .reading import WikipediaReadingDatasetProcessor
from .wiki_base import WikipediaBaseDatasetProcessor
_CITATION = ""
_DESCRIPTION = "These are datasets including the benchmark 'llm-jp-eval.'"
_HOMEPAGE = "https://github.com/llm-jp/llm-jp-eval/tree/v1.3.0"
_LICENSE = "This work is license under Apache-2.0 license"
_VERSION = datasets.Version("1.3.0")
class LlmJpEvalConfig(datasets.BuilderConfig):
def __init__(
self,
name: str = "default",
version: datasets.Version | str | None = _VERSION,
data_dir: str | None = None,
data_files: datasets.data_files.DataFilesDict | None = None,
description: str | None = _DESCRIPTION,
output_dir: str = "./data_llm-jp-eval",
) -> None:
super().__init__(
name=name,
version=version,
data_dir=data_dir,
data_files=data_files,
description=description,
)
self.output_dir = output_dir
class LlmJpEval(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
LlmJpEvalConfig(name="jamp"),
LlmJpEvalConfig(name="janli"),
LlmJpEvalConfig(name="jnli"),
LlmJpEvalConfig(name="jsem"),
LlmJpEvalConfig(name="jsick"),
LlmJpEvalConfig(name="jemhopqa"),
LlmJpEvalConfig(name="niilc"),
LlmJpEvalConfig(name="jsquad"),
LlmJpEvalConfig(name="jcommonsenseqa"),
LlmJpEvalConfig(name="chabsa"),
LlmJpEvalConfig(name="wiki_reading"),
LlmJpEvalConfig(name="wiki_ner"),
LlmJpEvalConfig(name="wiki_dependency"),
LlmJpEvalConfig(name="wiki_pas"),
LlmJpEvalConfig(name="wiki_coreference"),
LlmJpEvalConfig(name="mawps"),
LlmJpEvalConfig(name="jsts"),
]
def _info(self) -> datasets.DatasetInfo:
return datasets.DatasetInfo(
description=_DESCRIPTION,
citation=_CITATION,
homepage=_HOMEPAGE,
license=_LICENSE,
features=datasets.Features(
{
"input": datasets.Value("string"),
"output": datasets.Value("string"),
}
),
)
def _split_generators(
self, dl_manager: datasets.DownloadManager
) -> list[datasets.SplitGenerator]:
output_dir = Path(self.config.output_dir)
version_name = self.config.version.version_str
dataset_name = self.config.name
if dataset_name == "jamp":
processor = JAMPDatasetProcessor
elif dataset_name == "janli":
processor = JaNLIDatasetProcessor
elif dataset_name == "jnli":
processor = JNLIDatasetProcessor
elif dataset_name == "jsem":
processor = JSeMDatasetProcessor
elif dataset_name == "jsick":
processor = JSICKDatasetProcessor
elif dataset_name == "jemhopqa":
processor = JEMHopQADatasetProcessor
elif dataset_name == "niilc":
processor = NIILCDatasetProcessor
elif dataset_name == "jsquad":
processor = JSQuADDatasetProcessor
elif dataset_name == "jcommonsenseqa":
processor = JCommonsenseQADatasetProcessor
elif dataset_name == "chabsa":
processor = ChabsaDatasetProcessor
elif dataset_name == "wiki_reading":
processor = WikipediaReadingDatasetProcessor
elif dataset_name == "wiki_ner":
processor = WikipediaNERDatasetProcessor
elif dataset_name == "wiki_dependency":
processor = WikipediaDependencyDatasetProcessor
elif dataset_name == "wiki_pas":
processor = WikipediaPASDatasetProcessor
elif dataset_name == "wiki_coreference":
processor = WikipediaCoreferenceDatasetProcessor
elif dataset_name == "mawps":
processor = MawpsDatasetProcessor
elif dataset_name == "jsts":
processor = JSTSDatasetProcessor
processor(output_dir, version_name).pipeline()
eval_dir = output_dir / version_name / "evaluation"
with (eval_dir / "train" / f"{dataset_name}.json").open(encoding="utf-8") as f:
train_data = json.load(f)
with (eval_dir / "dev" / f"{dataset_name}.json").open(encoding="utf-8") as f:
validation_data = json.load(f)
with (eval_dir / "test" / f"{dataset_name}.json").open(encoding="utf-8") as f:
test_data = json.load(f)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"data": train_data["samples"]}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"data": validation_data["samples"]},
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"data": test_data["samples"]}
),
]
def _generate_examples(self, data: list[dict[str, str]]) -> Generator:
for i, d in enumerate(data):
yield i, d
|