yentinglin commited on
Commit
7c9f7d3
1 Parent(s): c2d6eb8

Delete tmlu.py

Browse files
Files changed (1) hide show
  1. tmlu.py +0 -121
tmlu.py DELETED
@@ -1,121 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
-
17
- import json
18
-
19
- import datasets
20
-
21
-
22
- _CITATION = """\
23
- @misc{taiwanllama,
24
- author={Lin, Yen-Ting and Chen, Yun-Nung},
25
- title={Language Models for Taiwanese Culture},
26
- year={2023},
27
- url={https://github.com/MiuLab/Taiwan-LLaMa},
28
- note={Code and models available at https://github.com/MiuLab/Taiwan-LLaMa},
29
- }
30
- """
31
-
32
- _DESCRIPTION = """\
33
- This is a Traditional Mandarin multitask test consisting of multiple-choice questions from various branches of knowledge in Taiwan.
34
- """
35
-
36
- _HOMEPAGE = "https://github.com/MiuLab/Taiwan-LLaMa"
37
-
38
- _URL = "https://huggingface.co/datasets/miulab/TMLU/raw/main/{subject}.jsonl"
39
-
40
- # TODO: add "all" subject
41
- _SUBJECTS = [
42
- "AST_biology",
43
- "AST_chemistry",
44
- "AST_chinese",
45
- "AST_physics",
46
- "GSAT_chinese",
47
- ]
48
-
49
-
50
- class Tmlu(datasets.GeneratorBasedBuilder):
51
- BUILDER_CONFIGS = [
52
- datasets.BuilderConfig(
53
- name=sub, version=datasets.Version("0.1.0"), description=f"TMLU Subject {sub}"
54
- )
55
- for sub in _SUBJECTS
56
- ]
57
-
58
- def _info(self):
59
- features = datasets.Features(
60
- {
61
- "id": datasets.Value("string"),
62
- "question": datasets.Value("string"),
63
- "correct_choices": datasets.features.Sequence(datasets.Value("string")),
64
- "incorrect_choices": datasets.features.Sequence(datasets.Value("string")),
65
- "metadata": {
66
- "timestamp": datasets.Value("string"),
67
- "source": datasets.Value("string"),
68
- },
69
- "human_evaluation": {
70
- "quality": datasets.Value("string"),
71
- "comments": datasets.Value("string"),
72
- },
73
- }
74
- )
75
- return datasets.DatasetInfo(
76
- description=_DESCRIPTION,
77
- features=features,
78
- homepage=_HOMEPAGE,
79
- citation=_CITATION,
80
- )
81
-
82
- def _split_generators(self, dl_manager):
83
- """Returns SplitGenerators."""
84
- archive = dl_manager.download(
85
- {
86
- self.config.name: _URL.format(subject=self.config.name),
87
- }
88
- )
89
- return [
90
- datasets.SplitGenerator(
91
- name=datasets.Split.TEST,
92
- gen_kwargs={"path": archive[self.config.name], "split": "test"},
93
- ),
94
- ]
95
-
96
- def _generate_examples(self, path, split):
97
- with open(path, encoding="utf-8") as f:
98
- for id_, row in enumerate(f):
99
- data = json.loads(row)
100
- yield id_, {
101
- "id": data["id"],
102
- "question": data["question"],
103
- "correct_choices": data["correct_choices"],
104
- "incorrect_choices": data["incorrect_choices"],
105
- "metadata": data["metadata"],
106
- "human_evaluation": data["human_evaluation"],
107
- }
108
- # def _generate_examples(self, iter_archive, split):
109
- # """Yields examples as (key, example) tuples."""
110
- # n_yielded_files = 0
111
- # for id_file, (path, file) in enumerate(iter_archive):
112
- # if f"data/{split}/" in path:
113
- # if split == "auxiliary_train" or f"{self.config.name}_{split}.csv" in path or self.config.name == "all":
114
- # subset = path.split("/")[-1].rsplit("_",1)[0] if split != "auxiliary_train" else ""
115
- # n_yielded_files += 1
116
- # lines = (line.decode("utf-8") for line in file)
117
- # reader = csv.reader(lines)
118
- # for id_line, data in enumerate(reader):
119
- # yield f"{id_file}_{id_line}", {"question": data[0], "choices": data[1:5], "answer": data[5], "subject": subset}
120
- # if (n_yielded_files == 8 or split != "auxiliary_train") and self.config.name != "all":
121
- # break