Datasets:

Modalities:
Text
Formats:
parquet
Languages:
Japanese
Size:
< 1K
Libraries:
Datasets
pandas
License:
Kosuke-Yamada commited on
Commit
1fe2cf6
1 Parent(s): cb2ac75

Delete loading script

Browse files
Files changed (1) hide show
  1. ja-vicuna-qa-benchmark.py +0 -67
ja-vicuna-qa-benchmark.py DELETED
@@ -1,67 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json
4
- from typing import Any, Generator
5
-
6
- import datasets
7
-
8
- _CITATION = ""
9
- _DESCRIPTION = "These are datasets including the benchmark 'ja-vicuna-qa-benchmark.'"
10
- _HOMEPAGE = "https://raw.githubusercontent.com/ku-nlp/ja-vicuna-qa-benchmark/main/data/jp_bench/question.jsonl"
11
- _LICENSE = "This work is license under Apache-2.0 license"
12
- _URL = "https://raw.githubusercontent.com/ku-nlp/ja-vicuna-qa-benchmark/main/data/jp_bench/question.jsonl"
13
- _VERSION = "1.1.0"
14
-
15
-
16
- class JaVicunaQaBenchmarkConfig(datasets.BuilderConfig):
17
- def __init__(
18
- self,
19
- name: str = "default",
20
- version: datasets.Version | str | None = datasets.Version(_VERSION),
21
- data_dir: str | None = None,
22
- data_files: datasets.data_files.DataFilesDict | None = None,
23
- description: str | None = _DESCRIPTION,
24
- ) -> None:
25
- super().__init__(
26
- name=name,
27
- version=version,
28
- data_dir=data_dir,
29
- data_files=data_files,
30
- description=description,
31
- )
32
-
33
-
34
- class JaVicunaQaBenchmark(datasets.GeneratorBasedBuilder):
35
- BUILDER_CONFIG_CLASS = JaVicunaQaBenchmarkConfig
36
-
37
- def _info(self) -> datasets.DatasetInfo:
38
- return datasets.DatasetInfo(
39
- description=_DESCRIPTION,
40
- citation=_CITATION,
41
- homepage=_HOMEPAGE,
42
- license=_LICENSE,
43
- features=datasets.Features(
44
- {
45
- "question_id": datasets.Value("int64"),
46
- "category": datasets.Value("string"),
47
- "turns": [datasets.Value("string")],
48
- }
49
- ),
50
- )
51
-
52
- def _split_generators(
53
- self, dl_manager: datasets.DownloadManager
54
- ) -> list[datasets.SplitGenerator]:
55
- dataset_file = dl_manager.download_and_extract(_URL)
56
- with open(dataset_file, "r", encoding="utf-8") as f:
57
- data = [json.loads(line) for line in f]
58
-
59
- return [
60
- datasets.SplitGenerator(
61
- name=datasets.Split.TEST, gen_kwargs={"data": data}
62
- ),
63
- ]
64
-
65
- def _generate_examples(self, data: list[dict[str, Any]]) -> Generator:
66
- for i, d in enumerate(data):
67
- yield i, d