shunk031 commited on
Commit
d201081
2 Parent(s): 5089718 b4253c2

Merge pull request #1 from shunk031/initialize

Browse files
.github/workflows/ci.yaml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ pull_request:
7
+ branches: [main]
8
+ paths-ignore:
9
+ - 'README.md'
10
+
11
+ jobs:
12
+ test:
13
+ runs-on: ubuntu-latest
14
+ strategy:
15
+ matrix:
16
+ python-version: ['3.8', '3.9', '3.10']
17
+
18
+ steps:
19
+ - uses: actions/checkout@v2
20
+ - name: Set up Python ${{ matrix.python-version }}
21
+ uses: actions/setup-python@v2
22
+ with:
23
+ python-version: ${{ matrix.python-version }}
24
+
25
+ - name: Install dependencies
26
+ run: |
27
+ pip install -U pip setuptools wheel poetry
28
+ poetry install
29
+ - name: Format
30
+ run: |
31
+ poetry run black --check .
32
+ - name: Lint
33
+ run: |
34
+ poetry run flake8 . --ignore=E501,W503,E203
35
+ - name: Type check
36
+ run: |
37
+ poetry run mypy . \
38
+ --ignore-missing-imports \
39
+ --no-strict-optional \
40
+ --no-site-packages \
41
+ --cache-dir=/dev/null
42
+
43
+ - name: Run tests
44
+ run: |
45
+ poetry run pytest --color=yes -rf
.github/workflows/push_to_hub.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face Hub
2
+
3
+ on:
4
+ workflow_run:
5
+ workflows:
6
+ - CI
7
+ branches:
8
+ - main
9
+ types:
10
+ - completed
11
+
12
+ jobs:
13
+ push_to_hub:
14
+ runs-on: ubuntu-latest
15
+
16
+ steps:
17
+ - name: Checkout repository
18
+ uses: actions/checkout@v2
19
+
20
+ - name: Push to Huggingface hub
21
+ env:
22
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
23
+ HF_USERNAME: ${{ secrets.HF_USERNAME }}
24
+ run: |
25
+ git fetch --unshallow
26
+ git push --force https://${HF_USERNAME}:${HF_TOKEN}@huggingface.co/datasets/${HF_USERNAME}/JGLUE main
.gitignore ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by https://www.toptal.com/developers/gitignore/api/python
2
+ # Edit at https://www.toptal.com/developers/gitignore?templates=python
3
+
4
+ ### Python ###
5
+ # Byte-compiled / optimized / DLL files
6
+ __pycache__/
7
+ *.py[cod]
8
+ *$py.class
9
+
10
+ # C extensions
11
+ *.so
12
+
13
+ # Distribution / packaging
14
+ .Python
15
+ build/
16
+ develop-eggs/
17
+ dist/
18
+ downloads/
19
+ eggs/
20
+ .eggs/
21
+ lib/
22
+ lib64/
23
+ parts/
24
+ sdist/
25
+ var/
26
+ wheels/
27
+ share/python-wheels/
28
+ *.egg-info/
29
+ .installed.cfg
30
+ *.egg
31
+ MANIFEST
32
+
33
+ # PyInstaller
34
+ # Usually these files are written by a python script from a template
35
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
36
+ *.manifest
37
+ *.spec
38
+
39
+ # Installer logs
40
+ pip-log.txt
41
+ pip-delete-this-directory.txt
42
+
43
+ # Unit test / coverage reports
44
+ htmlcov/
45
+ .tox/
46
+ .nox/
47
+ .coverage
48
+ .coverage.*
49
+ .cache
50
+ nosetests.xml
51
+ coverage.xml
52
+ *.cover
53
+ *.py,cover
54
+ .hypothesis/
55
+ .pytest_cache/
56
+ cover/
57
+
58
+ # Translations
59
+ *.mo
60
+ *.pot
61
+
62
+ # Django stuff:
63
+ *.log
64
+ local_settings.py
65
+ db.sqlite3
66
+ db.sqlite3-journal
67
+
68
+ # Flask stuff:
69
+ instance/
70
+ .webassets-cache
71
+
72
+ # Scrapy stuff:
73
+ .scrapy
74
+
75
+ # Sphinx documentation
76
+ docs/_build/
77
+
78
+ # PyBuilder
79
+ .pybuilder/
80
+ target/
81
+
82
+ # Jupyter Notebook
83
+ .ipynb_checkpoints
84
+
85
+ # IPython
86
+ profile_default/
87
+ ipython_config.py
88
+
89
+ # pyenv
90
+ # For a library or package, you might want to ignore these files since the code is
91
+ # intended to run in multiple environments; otherwise, check them in:
92
+ .python-version
93
+
94
+ # pipenv
95
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
96
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
97
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
98
+ # install all needed dependencies.
99
+ #Pipfile.lock
100
+
101
+ # poetry
102
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
103
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
104
+ # commonly ignored for libraries.
105
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
106
+ #poetry.lock
107
+
108
+ # pdm
109
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
110
+ #pdm.lock
111
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
112
+ # in version control.
113
+ # https://pdm.fming.dev/#use-with-ide
114
+ .pdm.toml
115
+
116
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
117
+ __pypackages__/
118
+
119
+ # Celery stuff
120
+ celerybeat-schedule
121
+ celerybeat.pid
122
+
123
+ # SageMath parsed files
124
+ *.sage.py
125
+
126
+ # Environments
127
+ .env
128
+ .venv
129
+ env/
130
+ venv/
131
+ ENV/
132
+ env.bak/
133
+ venv.bak/
134
+
135
+ # Spyder project settings
136
+ .spyderproject
137
+ .spyproject
138
+
139
+ # Rope project settings
140
+ .ropeproject
141
+
142
+ # mkdocs documentation
143
+ /site
144
+
145
+ # mypy
146
+ .mypy_cache/
147
+ .dmypy.json
148
+ dmypy.json
149
+
150
+ # Pyre type checker
151
+ .pyre/
152
+
153
+ # pytype static type analyzer
154
+ .pytype/
155
+
156
+ # Cython debug symbols
157
+ cython_debug/
158
+
159
+ # PyCharm
160
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
161
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
162
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
163
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
164
+ #.idea/
165
+
166
+ ### Python Patch ###
167
+ # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
168
+ poetry.toml
169
+
170
+ # ruff
171
+ .ruff_cache/
172
+
173
+ # End of https://www.toptal.com/developers/gitignore/api/python
JGLUE.py ADDED
@@ -0,0 +1,494 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import random
3
+ import string
4
+ from typing import Dict, List, Optional, Union
5
+
6
+ import datasets as ds
7
+ import pandas as pd
8
+
9
+ _CITATION = """\
10
+ @inproceedings{kurihara-etal-2022-jglue,
11
+ title = "{JGLUE}: {J}apanese General Language Understanding Evaluation",
12
+ author = "Kurihara, Kentaro and
13
+ Kawahara, Daisuke and
14
+ Shibata, Tomohide",
15
+ booktitle = "Proceedings of the Thirteenth Language Resources and Evaluation Conference",
16
+ month = jun,
17
+ year = "2022",
18
+ address = "Marseille, France",
19
+ publisher = "European Language Resources Association",
20
+ url = "https://aclanthology.org/2022.lrec-1.317",
21
+ pages = "2957--2966",
22
+ abstract = "To develop high-performance natural language understanding (NLU) models, it is necessary to have a benchmark to evaluate and analyze NLU ability from various perspectives. While the English NLU benchmark, GLUE, has been the forerunner, benchmarks are now being released for languages other than English, such as CLUE for Chinese and FLUE for French; but there is no such benchmark for Japanese. We build a Japanese NLU benchmark, JGLUE, from scratch without translation to measure the general NLU ability in Japanese. We hope that JGLUE will facilitate NLU research in Japanese.",
23
+ }
24
+
25
+ @InProceedings{Kurihara_nlp2022,
26
+ author = "栗原健太郎 and 河原大輔 and 柴田知秀",
27
+ title = "JGLUE: 日本語言語理解ベンチマーク",
28
+ booktitle = "言語処理学会第28回年次大会",
29
+ year = "2022",
30
+ url = "https://www.anlp.jp/proceedings/annual_meeting/2022/pdf_dir/E8-4.pdf"
31
+ note= "in Japanese"
32
+ }
33
+ """
34
+
35
+ _DESCRIPTION = """\
36
+ JGLUE, Japanese General Language Understanding Evaluation, is built to measure the general NLU ability in Japanese. JGLUE has been constructed from scratch without translation. We hope that JGLUE will facilitate NLU research in Japanese.
37
+ """
38
+
39
+ _HOMEPAGE = "https://github.com/yahoojapan/JGLUE"
40
+
41
+ _LICENSE = """\
42
+ This work is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License.
43
+ """
44
+
45
+ _DESCRIPTION_CONFIGS = {
46
+ "MARC-ja": "MARC-ja is a dataset of the text classification task. This dataset is based on the Japanese portion of Multilingual Amazon Reviews Corpus (MARC) (Keung+, 2020).",
47
+ "JSTS": "JSTS is a Japanese version of the STS (Semantic Textual Similarity) dataset. STS is a task to estimate the semantic similarity of a sentence pair.",
48
+ "JNLI": "JNLI is a Japanese version of the NLI (Natural Language Inference) dataset. NLI is a task to recognize the inference relation that a premise sentence has to a hypothesis sentence.",
49
+ "JSQuAD": "JSQuAD is a Japanese version of SQuAD (Rajpurkar+, 2016), one of the datasets of reading comprehension.",
50
+ "JCommonsenseQA": "JCommonsenseQA is a Japanese version of CommonsenseQA (Talmor+, 2019), which is a multiple-choice question answering dataset that requires commonsense reasoning ability.",
51
+ }
52
+
53
+ _URLS = {
54
+ "MARC-ja": {
55
+ "data": "https://s3.amazonaws.com/amazon-reviews-pds/tsv/amazon_reviews_multilingual_JP_v1_00.tsv.gz",
56
+ "filter_review_id_list": {
57
+ "valid": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/preprocess/marc-ja/data/filter_review_id_list/valid.txt"
58
+ },
59
+ "label_conv_review_id_list": {
60
+ "valid": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/preprocess/marc-ja/data/label_conv_review_id_list/valid.txt"
61
+ },
62
+ },
63
+ "JSTS": {
64
+ "train": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jsts-v1.1/train-v1.1.json",
65
+ "valid": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jsts-v1.1/valid-v1.1.json",
66
+ },
67
+ "JNLI": {
68
+ "train": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jnli-v1.1/train-v1.1.json",
69
+ "valid": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jnli-v1.1/valid-v1.1.json",
70
+ },
71
+ "JSQuAD": {
72
+ "train": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jsquad-v1.1/train-v1.1.json",
73
+ "valid": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jsquad-v1.1/valid-v1.1.json",
74
+ },
75
+ "JCommonsenseQA": {
76
+ "train": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jcommonsenseqa-v1.1/train-v1.1.json",
77
+ "valid": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jcommonsenseqa-v1.1/valid-v1.1.json",
78
+ },
79
+ }
80
+
81
+
82
+ def features_jsts() -> ds.Features:
83
+ features = ds.Features(
84
+ {
85
+ "sentence_pair_id": ds.Value("string"),
86
+ "yjcaptions_id": ds.Value("string"),
87
+ "sentence1": ds.Value("string"),
88
+ "sentence2": ds.Value("string"),
89
+ "label": ds.Value("float"),
90
+ }
91
+ )
92
+ return features
93
+
94
+
95
+ def features_jnli() -> ds.Features:
96
+ features = ds.Features(
97
+ {
98
+ "sentence_pair_id": ds.Value("string"),
99
+ "yjcaptions_id": ds.Value("string"),
100
+ "sentence1": ds.Value("string"),
101
+ "sentence2": ds.Value("string"),
102
+ "label": ds.ClassLabel(
103
+ num_classes=3, names=["entailment", "contradiction", "neutral"]
104
+ ),
105
+ }
106
+ )
107
+ return features
108
+
109
+
110
+ def features_jsquad() -> ds.Features:
111
+ title = ds.Value("string")
112
+ answers = ds.Sequence(
113
+ {"text": ds.Value("string"), "answer_start": ds.Value("int64")}
114
+ )
115
+ qas = ds.Sequence(
116
+ {
117
+ "question": ds.Value("string"),
118
+ "id": ds.Value("string"),
119
+ "answers": answers,
120
+ "is_impossible": ds.Value("bool"),
121
+ }
122
+ )
123
+ paragraphs = ds.Sequence({"qas": qas, "context": ds.Value("string")})
124
+ features = ds.Features(
125
+ {"data": ds.Sequence({"title": title, "paragraphs": paragraphs})}
126
+ )
127
+ return features
128
+
129
+
130
+ def features_jcommonsenseqa() -> ds.Features:
131
+ features = ds.Features(
132
+ {
133
+ "q_id": ds.Value("int64"),
134
+ "question": ds.Value("string"),
135
+ "choice0": ds.Value("string"),
136
+ "choice1": ds.Value("string"),
137
+ "choice2": ds.Value("string"),
138
+ "choice3": ds.Value("string"),
139
+ "choice4": ds.Value("string"),
140
+ "label": ds.Value("int8"),
141
+ }
142
+ )
143
+ return features
144
+
145
+
146
+ def features_marc_ja() -> ds.Features:
147
+ features = ds.Features(
148
+ {
149
+ "sentence": ds.Value("string"),
150
+ "label": ds.ClassLabel(
151
+ num_classes=3, names=["positive", "negative", "neutral"]
152
+ ),
153
+ "review_id": ds.Value("string"),
154
+ }
155
+ )
156
+ return features
157
+
158
+
159
+ class MarcJaConfig(ds.BuilderConfig):
160
+ def __init__(
161
+ self,
162
+ name: str = "MARC-ja",
163
+ is_han_to_zen: bool = False,
164
+ max_instance_num: Optional[int] = None,
165
+ max_char_length: int = 500,
166
+ is_pos_neg: bool = True,
167
+ train_ratio: float = 0.94,
168
+ val_ratio: float = 0.03,
169
+ test_ratio: float = 0.03,
170
+ output_testset: bool = False,
171
+ filter_review_id_list_valid: bool = True,
172
+ label_conv_review_id_list_valid: bool = True,
173
+ version: Optional[Union[ds.utils.Version, str]] = ds.utils.Version("0.0.0"),
174
+ data_dir: Optional[str] = None,
175
+ data_files: Optional[ds.data_files.DataFilesDict] = None,
176
+ description: Optional[str] = None,
177
+ ) -> None:
178
+ super().__init__(
179
+ name=name,
180
+ version=version,
181
+ data_dir=data_dir,
182
+ data_files=data_files,
183
+ description=description,
184
+ )
185
+ assert train_ratio + val_ratio + test_ratio == 1.0
186
+
187
+ self.train_ratio = train_ratio
188
+ self.val_ratio = val_ratio
189
+ self.test_ratio = test_ratio
190
+
191
+ self.is_han_to_zen = is_han_to_zen
192
+ self.max_instance_num = max_instance_num
193
+ self.max_char_length = max_char_length
194
+ self.is_pos_neg = is_pos_neg
195
+ self.output_testset = output_testset
196
+
197
+ self.filter_review_id_list_valid = filter_review_id_list_valid
198
+ self.label_conv_review_id_list_valid = label_conv_review_id_list_valid
199
+
200
+
201
+ def get_label(rating: int, is_pos_neg: bool = False) -> Optional[str]:
202
+ if rating >= 4:
203
+ return "positive"
204
+ elif rating <= 2:
205
+ return "negative"
206
+ else:
207
+ if is_pos_neg:
208
+ return None
209
+ else:
210
+ return "neutral"
211
+
212
+
213
+ def is_filtered_by_ascii_rate(text: str, threshold: float = 0.9) -> bool:
214
+ ascii_letters = set(string.printable)
215
+ rate = sum(c in ascii_letters for c in text) / len(text)
216
+ return rate >= threshold
217
+
218
+
219
+ def shuffle_dataframe(df: pd.DataFrame) -> pd.DataFrame:
220
+ instances = df.to_dict(orient="records")
221
+ random.seed(1)
222
+ random.shuffle(instances)
223
+ return pd.DataFrame(instances)
224
+
225
+
226
+ def get_filter_review_id_list(
227
+ filter_review_id_list_paths: Dict[str, str],
228
+ ) -> Dict[str, List[str]]:
229
+ filter_review_id_list_valid = filter_review_id_list_paths.get("valid")
230
+ filter_review_id_list_test = filter_review_id_list_paths.get("test")
231
+
232
+ filter_review_id_list = {}
233
+
234
+ if filter_review_id_list_valid is not None:
235
+ with open(filter_review_id_list_valid, "r") as rf:
236
+ filter_review_id_list["valid"] = [line.rstrip() for line in rf]
237
+
238
+ if filter_review_id_list_test is not None:
239
+ with open(filter_review_id_list_test, "r") as rf:
240
+ filter_review_id_list["test"] = [line.rstrip() for line in rf]
241
+
242
+ return filter_review_id_list
243
+
244
+
245
+ def get_label_conv_review_id_list(
246
+ label_conv_review_id_list_paths: Dict[str, str],
247
+ ) -> Dict[str, Dict[str, str]]:
248
+ import csv
249
+
250
+ label_conv_review_id_list_valid = label_conv_review_id_list_paths.get("valid")
251
+ label_conv_review_id_list_test = label_conv_review_id_list_paths.get("test")
252
+
253
+ label_conv_review_id_list: Dict[str, Dict[str, str]] = {}
254
+
255
+ if label_conv_review_id_list_valid is not None:
256
+ with open(label_conv_review_id_list_valid, "r") as rf:
257
+ label_conv_review_id_list["valid"] = {
258
+ row[0]: row[1] for row in csv.reader(rf)
259
+ }
260
+
261
+ if label_conv_review_id_list_test is not None:
262
+ with open(label_conv_review_id_list_test, "r") as rf:
263
+ label_conv_review_id_list["test"] = {
264
+ row[0]: row[1] for row in csv.reader(rf)
265
+ }
266
+
267
+ return label_conv_review_id_list
268
+
269
+
270
+ def output_data(
271
+ df: pd.DataFrame,
272
+ train_ratio: float,
273
+ val_ratio: float,
274
+ test_ratio: float,
275
+ output_testset: bool,
276
+ filter_review_id_list_paths: Dict[str, str],
277
+ label_conv_review_id_list_paths: Dict[str, str],
278
+ ) -> Dict[str, pd.DataFrame]:
279
+ instance_num = len(df)
280
+ split_dfs: Dict[str, pd.DataFrame] = {}
281
+ length1 = int(instance_num * train_ratio)
282
+ split_dfs["train"] = df.iloc[:length1]
283
+
284
+ length2 = int(instance_num * (train_ratio + val_ratio))
285
+ split_dfs["valid"] = df.iloc[length1:length2]
286
+ split_dfs["test"] = df.iloc[length2:]
287
+
288
+ filter_review_id_list = get_filter_review_id_list(
289
+ filter_review_id_list_paths=filter_review_id_list_paths,
290
+ )
291
+ label_conv_review_id_list = get_label_conv_review_id_list(
292
+ label_conv_review_id_list_paths=label_conv_review_id_list_paths,
293
+ )
294
+
295
+ for eval_type in ("valid", "test"):
296
+ if filter_review_id_list.get(eval_type):
297
+ df = split_dfs[eval_type]
298
+ df = df[~df["review_id"].isin(filter_review_id_list[eval_type])]
299
+ split_dfs[eval_type] = df
300
+
301
+ for eval_type in ("valid", "test"):
302
+ if label_conv_review_id_list.get(eval_type):
303
+ df = split_dfs[eval_type]
304
+ df = df.assign(
305
+ converted_label=df["review_id"].map(label_conv_review_id_list["valid"])
306
+ )
307
+ df = df.assign(
308
+ label=df[["label", "converted_label"]].apply(
309
+ lambda xs: xs["label"]
310
+ if pd.isnull(xs["converted_label"])
311
+ else xs["converted_label"],
312
+ axis=1,
313
+ )
314
+ )
315
+ df = df.drop(columns=["converted_label"])
316
+ split_dfs[eval_type] = df
317
+
318
+ return {
319
+ "train": split_dfs["train"],
320
+ "valid": split_dfs["valid"],
321
+ }
322
+
323
+
324
+ def preprocess_for_marc_ja(
325
+ config: MarcJaConfig,
326
+ data_file_path: str,
327
+ filter_review_id_list_paths: Dict[str, str],
328
+ label_conv_review_id_list_paths: Dict[str, str],
329
+ ) -> Dict[str, pd.DataFrame]:
330
+ import mojimoji
331
+ from bs4 import BeautifulSoup
332
+ from tqdm import tqdm
333
+
334
+ df = pd.read_csv(data_file_path, delimiter="\t")
335
+ df = df[["review_body", "star_rating", "review_id"]]
336
+
337
+ # rename columns
338
+ df = df.rename(columns={"review_body": "text", "star_rating": "rating"})
339
+
340
+ # convert the rating to label
341
+ tqdm.pandas(dynamic_ncols=True, desc="Convert the rating to the label")
342
+ df = df.assign(
343
+ label=df["rating"].progress_apply(
344
+ lambda rating: get_label(rating, config.is_pos_neg)
345
+ )
346
+ )
347
+
348
+ # remove rows where the label is None
349
+ df = df[~df["label"].isnull()]
350
+
351
+ # remove html tags from the text
352
+ tqdm.pandas(dynamic_ncols=True, desc="Remove html tags from the text")
353
+ df = df.assign(
354
+ text=df["text"].progress_apply(
355
+ lambda text: BeautifulSoup(text, "html.parser").get_text()
356
+ )
357
+ )
358
+
359
+ # filter by ascii rate
360
+ tqdm.pandas(dynamic_ncols=True, desc="Filter by ascii rate")
361
+ df = df[~df["text"].progress_apply(is_filtered_by_ascii_rate)]
362
+
363
+ if config.max_char_length is not None:
364
+ df = df[df["text"].str.len() <= config.max_char_length]
365
+
366
+ if config.is_han_to_zen:
367
+ df = df.assign(text=df["text"].apply(mojimoji.han_to_zen))
368
+
369
+ df = df[["text", "label", "review_id"]]
370
+ df = df.rename(columns={"text": "sentence"})
371
+
372
+ # shuffle dataset
373
+ df = shuffle_dataframe(df)
374
+
375
+ split_dfs = output_data(
376
+ df=df,
377
+ train_ratio=config.train_ratio,
378
+ val_ratio=config.val_ratio,
379
+ test_ratio=config.test_ratio,
380
+ output_testset=config.output_testset,
381
+ filter_review_id_list_paths=filter_review_id_list_paths,
382
+ label_conv_review_id_list_paths=label_conv_review_id_list_paths,
383
+ )
384
+ return split_dfs
385
+
386
+
387
+ class JGLUE(ds.GeneratorBasedBuilder):
388
+ VERSION = ds.Version("1.1.0")
389
+ BUILDER_CONFIGS = [
390
+ MarcJaConfig(
391
+ name="MARC-ja",
392
+ version=VERSION,
393
+ description=_DESCRIPTION_CONFIGS["MARC-ja"],
394
+ ),
395
+ ds.BuilderConfig(
396
+ name="JSTS",
397
+ version=VERSION,
398
+ description=_DESCRIPTION_CONFIGS["JSTS"],
399
+ ),
400
+ ds.BuilderConfig(
401
+ name="JNLI",
402
+ version=VERSION,
403
+ description=_DESCRIPTION_CONFIGS["JNLI"],
404
+ ),
405
+ ds.BuilderConfig(
406
+ name="JSQuAD",
407
+ version=VERSION,
408
+ description=_DESCRIPTION_CONFIGS["JSQuAD"],
409
+ ),
410
+ ds.BuilderConfig(
411
+ name="JCommonsenseQA",
412
+ version=VERSION,
413
+ description=_DESCRIPTION_CONFIGS["JCommonsenseQA"],
414
+ ),
415
+ ]
416
+
417
+ def _info(self) -> ds.DatasetInfo:
418
+ if self.config.name == "JSTS":
419
+ features = features_jsts()
420
+ elif self.config.name == "JNLI":
421
+ features = features_jnli()
422
+ elif self.config.name == "JSQuAD":
423
+ features = features_jsquad()
424
+ elif self.config.name == "JCommonsenseQA":
425
+ features = features_jcommonsenseqa()
426
+ elif self.config.name == "MARC-ja":
427
+ features = features_marc_ja()
428
+ else:
429
+ raise ValueError(f"Invalid config name: {self.config.name}")
430
+
431
+ return ds.DatasetInfo(
432
+ description=_DESCRIPTION,
433
+ citation=_CITATION,
434
+ homepage=_HOMEPAGE,
435
+ license=_LICENSE,
436
+ features=features,
437
+ )
438
+
439
+ def _split_generators(self, dl_manager: ds.DownloadManager):
440
+ file_paths = dl_manager.download_and_extract(_URLS[self.config.name])
441
+
442
+ if self.config.name == "MARC-ja":
443
+ filter_review_id_list = file_paths["filter_review_id_list"]
444
+ label_conv_review_id_list = file_paths["label_conv_review_id_list"]
445
+
446
+ split_dfs = preprocess_for_marc_ja(
447
+ config=self.config,
448
+ data_file_path=file_paths["data"],
449
+ filter_review_id_list_paths=filter_review_id_list,
450
+ label_conv_review_id_list_paths=label_conv_review_id_list,
451
+ )
452
+ return [
453
+ ds.SplitGenerator(
454
+ name=ds.Split.TRAIN,
455
+ gen_kwargs={"split_df": split_dfs["train"]},
456
+ ),
457
+ ds.SplitGenerator(
458
+ name=ds.Split.VALIDATION,
459
+ gen_kwargs={"split_df": split_dfs["valid"]},
460
+ ),
461
+ ]
462
+ else:
463
+ return [
464
+ ds.SplitGenerator(
465
+ name=ds.Split.TRAIN,
466
+ gen_kwargs={"file_path": file_paths["train"]},
467
+ ),
468
+ ds.SplitGenerator(
469
+ name=ds.Split.VALIDATION,
470
+ gen_kwargs={"file_path": file_paths["valid"]},
471
+ ),
472
+ ]
473
+
474
+ def _generate_examples(
475
+ self,
476
+ file_path: Optional[str] = None,
477
+ split_df: Optional[pd.DataFrame] = None,
478
+ ):
479
+ if self.config.name == "MARC-ja":
480
+ if split_df is None:
481
+ raise ValueError(f"Invalid preprocessing for {self.config.name}")
482
+
483
+ instances = split_df.to_dict(orient="records")
484
+ for i, data_dict in enumerate(instances):
485
+ yield i, data_dict
486
+
487
+ else:
488
+ if file_path is None:
489
+ raise ValueError(f"Invalid argument for {self.config.name}")
490
+
491
+ with open(file_path, "r") as rf:
492
+ for i, line in enumerate(rf):
493
+ json_dict = json.loads(line)
494
+ yield i, json_dict
README.md ADDED
@@ -0,0 +1,541 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - crowdsourced
4
+ language:
5
+ - ja
6
+ language_creators:
7
+ - crowdsourced
8
+ - found
9
+ license:
10
+ - cc-by-4.0
11
+ multilinguality:
12
+ - monolingual
13
+ pretty_name: JGLUE
14
+ size_categories: []
15
+ source_datasets:
16
+ - original
17
+ tags:
18
+ - MARC
19
+ - STS
20
+ - NLI
21
+ - SQuAD
22
+ - CommonsenseQA
23
+ task_categories:
24
+ - multiple-choice
25
+ - question-answering
26
+ - sentence-similarity
27
+ - text-classification
28
+ task_ids:
29
+ - multiple-choice-qa
30
+ - open-domain-qa
31
+ - multi-class-classification
32
+ - sentiment-classification
33
+ ---
34
+
35
+ # Dataset Card for JGLUE
36
+
37
+ [![CI](https://github.com/shunk031/huggingface-datasets_JGLUE/actions/workflows/ci.yaml/badge.svg)](https://github.com/shunk031/huggingface-datasets_JGLUE/actions/workflows/ci.yaml)
38
+ [![ACL2020 2020.acl-main.419](https://img.shields.io/badge/LREC2022-2022.lrec--1.317-red)](https://aclanthology.org/2022.lrec-1.317)
39
+
40
+ ## Table of Contents
41
+ - [Table of Contents](#table-of-contents)
42
+ - [Dataset Description](#dataset-description)
43
+ - [Dataset Summary](#dataset-summary)
44
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
45
+ - [Languages](#languages)
46
+ - [Dataset Structure](#dataset-structure)
47
+ - [Data Instances](#data-instances)
48
+ - [Data Fields](#data-fields)
49
+ - [Data Splits](#data-splits)
50
+ - [Dataset Creation](#dataset-creation)
51
+ - [Curation Rationale](#curation-rationale)
52
+ - [Source Data](#source-data)
53
+ - [Annotations](#annotations)
54
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
55
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
56
+ - [Social Impact of Dataset](#social-impact-of-dataset)
57
+ - [Discussion of Biases](#discussion-of-biases)
58
+ - [Other Known Limitations](#other-known-limitations)
59
+ - [Additional Information](#additional-information)
60
+ - [Dataset Curators](#dataset-curators)
61
+ - [Licensing Information](#licensing-information)
62
+ - [Citation Information](#citation-information)
63
+ - [Contributions](#contributions)
64
+
65
+ ## Dataset Description
66
+
67
+ - **Homepage:** https://github.com/yahoojapan/JGLUE
68
+ - **Repository:** https://github.com/shunk031/huggingface-datasets_JGLUE
69
+
70
+ ### Dataset Summary
71
+
72
+ From [the official README.md](https://github.com/yahoojapan/JGLUE#jglue-japanese-general-language-understanding-evaluation):
73
+
74
+ > JGLUE, Japanese General Language Understanding Evaluation, is built to measure the general NLU ability in Japanese. JGLUE has been constructed from scratch without translation. We hope that JGLUE will facilitate NLU research in Japanese.
75
+
76
+ > JGLUE has been constructed by a joint research project of Yahoo Japan Corporation and Kawahara Lab at Waseda University.
77
+
78
+ ### Supported Tasks and Leaderboards
79
+
80
+ From [the official README.md](https://github.com/yahoojapan/JGLUE#tasksdatasets):
81
+
82
+ > JGLUE consists of the tasks of text classification, sentence pair classification, and QA. Each task consists of multiple datasets.
83
+
84
+ #### Supported Tasks
85
+
86
+ ##### MARC-ja
87
+
88
+ From [the official README.md](https://github.com/yahoojapan/JGLUE#marc-ja):
89
+
90
+ > MARC-ja is a dataset of the text classification task. This dataset is based on the Japanese portion of [Multilingual Amazon Reviews Corpus (MARC)](https://docs.opendata.aws/amazon-reviews-ml/readme.html) ([Keung+, 2020](https://aclanthology.org/2020.emnlp-main.369/)).
91
+
92
+ ##### JSTS
93
+
94
+ From [the official README.md](https://github.com/yahoojapan/JGLUE#jsts):
95
+
96
+ > JSTS is a Japanese version of the STS (Semantic Textual Similarity) dataset. STS is a task to estimate the semantic similarity of a sentence pair. The sentences in JSTS and JNLI (described below) are extracted from the Japanese version of the MS COCO Caption Dataset, [the YJ Captions Dataset](https://github.com/yahoojapan/YJCaptions) ([Miyazaki and Shimizu, 2016](https://aclanthology.org/P16-1168/)).
97
+
98
+ ##### JNLI
99
+
100
+ From [the official README.md](https://github.com/yahoojapan/JGLUE#jnli):
101
+
102
+ > JNLI is a Japanese version of the NLI (Natural Language Inference) dataset. NLI is a task to recognize the inference relation that a premise sentence has to a hypothesis sentence. The inference relations are entailment, contradiction, and neutral.
103
+
104
+ ##### JSQuAD
105
+
106
+ From [the official README.md](https://github.com/yahoojapan/JGLUE#jsquad):
107
+
108
+ > JSQuAD is a Japanese version of [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) ([Rajpurkar+, 2018](https://aclanthology.org/P18-2124/)), one of the datasets of reading comprehension. Each instance in the dataset consists of a question regarding a given context (Wikipedia article) and its answer. JSQuAD is based on SQuAD 1.1 (there are no unanswerable questions). We used [the Japanese Wikipedia dump](https://dumps.wikimedia.org/jawiki/) as of 20211101.
109
+
110
+ ##### JCommonsenseQA
111
+
112
+ From [the official README.md](https://github.com/yahoojapan/JGLUE#jcommonsenseqa):
113
+
114
+ > JCommonsenseQA is a Japanese version of [CommonsenseQA](https://www.tau-nlp.org/commonsenseqa) ([Talmor+, 2019](https://aclanthology.org/N19-1421/)), which is a multiple-choice question answering dataset that requires commonsense reasoning ability. It is built using crowdsourcing with seeds extracted from the knowledge base [ConceptNet](https://conceptnet.io/).
115
+
116
+ #### Leaderboard
117
+
118
+ From [the official README.md](https://github.com/yahoojapan/JGLUE#leaderboard):
119
+
120
+ > A leaderboard will be made public soon. The test set will be released at that time.
121
+
122
+ ### Languages
123
+
124
+ The language data in JGLUE is in Japanese ([BCP-47 ja-JP](https://www.rfc-editor.org/info/bcp47)).
125
+
126
+ ## Dataset Structure
127
+
128
+ ### Data Instances
129
+
130
+ When loading a specific configuration, users has to append a version dependent suffix:
131
+
132
+ #### MARC-ja
133
+
134
+ ```python
135
+ from datasets import load_dataset
136
+
137
+ dataset = load_dataset("shunk031/JGLUE", name="MARC-ja")
138
+
139
+ print(dataset)
140
+ # DatasetDict({
141
+ # train: Dataset({
142
+ # features: ['sentence', 'label', 'review_id'],
143
+ # num_rows: 187528
144
+ # })
145
+ # validation: Dataset({
146
+ # features: ['sentence', 'label', 'review_id'],
147
+ # num_rows: 5654
148
+ # })
149
+ # })
150
+ ```
151
+
152
+ #### JSTS
153
+
154
+ ```python
155
+ from datasets import load_dataset
156
+
157
+ dataset = load_dataset("shunk031/JGLUE", name="JSTS")
158
+
159
+ print(dataset)
160
+ # DatasetDict({
161
+ # train: Dataset({
162
+ # features: ['sentence_pair_id', 'yjcaptions_id', 'sentence1', 'sentence2', 'label'],
163
+ # num_rows: 12451
164
+ # })
165
+ # validation: Dataset({
166
+ # features: ['sentence_pair_id', 'yjcaptions_id', 'sentence1', 'sentence2', 'label'],
167
+ # num_rows: 1457
168
+ # })
169
+ # })
170
+ ```
171
+
172
+ An example of the JSTS dataset looks as follows:
173
+
174
+ ```json
175
+ {
176
+ "sentence_pair_id": "691",
177
+ "yjcaptions_id": "127202-129817-129818",
178
+ "sentence1": "街中の道路を大きなバスが走っています。 (A big bus is running on the road in the city.)",
179
+ "sentence2": "道路を大きなバスが走っています。 (There is a big bus running on the road.)",
180
+ "label": 4.4
181
+ }
182
+ ```
183
+
184
+ #### JNLI
185
+
186
+ ```python
187
+ from datasets import load_dataset
188
+
189
+ dataset = load_dataset("shunk031/JGLUE", name="JNLI")
190
+
191
+ print(dataset)
192
+ # DatasetDict({
193
+ # train: Dataset({
194
+ # features: ['sentence_pair_id', 'yjcaptions_id', 'sentence1', 'sentence2', 'label'],
195
+ # num_rows: 20073
196
+ # })
197
+ # validation: Dataset({
198
+ # features: ['sentence_pair_id', 'yjcaptions_id', 'sentence1', 'sentence2', 'label'],
199
+ # num_rows: 2434
200
+ # })
201
+ # })
202
+ ```
203
+
204
+ An example of the JNLI dataset looks as follows:
205
+
206
+ ```json
207
+ {
208
+ "sentence_pair_id": "1157",
209
+ "yjcaptions_id": "127202-129817-129818",
210
+ "sentence1": "街中の道路を大きなバスが走っています。 (A big bus is running on the road in the city.)",
211
+ "sentence2": "道路を大きなバスが走っています。 (There is a big bus running on the road.)",
212
+ "label": "entailment"
213
+ }
214
+ ```
215
+
216
+ #### JSQuAD
217
+
218
+ ```python
219
+ from datasets import load_dataset
220
+
221
+ dataset = load_dataset("shunk031/JGLUE", name="JSQuAD")
222
+
223
+ print(dataset)
224
+ # DatasetDict({
225
+ # train: Dataset({
226
+ # features: ['data'],
227
+ # num_rows: 1
228
+ # })
229
+ # validation: Dataset({
230
+ # features: ['data'],
231
+ # num_rows: 1
232
+ # })
233
+ # })
234
+ ```
235
+
236
+ An example of the JSQuAD looks as follows:
237
+
238
+ ```json
239
+ {
240
+ "title": "東海道新幹線 (Tokaido Shinkansen)",
241
+ "paragraphs": [
242
+ {
243
+ "qas": [
244
+ {
245
+ "question": "2020 年(令和 2 年)3 月現在、東京駅 - 新大阪駅間の最高速度はどのくらいか。 (What is the maximum speed between Tokyo Station and Shin-Osaka Station as of March 2020?)",
246
+ "id": "a1531320p0q0",
247
+ "answers": [
248
+ {
249
+ "text": "285 km/h",
250
+ "answer_start": 182
251
+ }
252
+ ],
253
+ "is_impossible": false
254
+ },
255
+ {
256
+ ..
257
+ }
258
+ ],
259
+ "context": "東海道新幹線 [SEP] 1987 年(昭和 62 年)4 月 1 日の国鉄分割民営化により、JR 東海が運営を継承した。西日本旅客鉄道(JR 西日本)が継承した山陽新幹線とは相互乗り入れが行われており、東海道新幹線区間のみで運転される列車にも JR 西日本所有の車両が使用されることがある。2020 年(令和 2 年)3 月現在、東京駅 - 新大阪駅間の所要時間は最速 2 時間 21 分、最高速度 285 km/h で運行されている。"
260
+ }
261
+ ]
262
+ }
263
+ ```
264
+
265
+ #### JCommonsenseQA
266
+
267
+ ```python
268
+ from datasets import load_dataset
269
+
270
+ dataset = load_dataset("shunk031/JGLUE", name="JCommonsenseQA")
271
+
272
+ print(dataset)
273
+ # DatasetDict({
274
+ # train: Dataset({
275
+ # features: ['q_id', 'question', 'choice0', 'choice1', 'choice2', 'choice3', 'choice4', 'label'],
276
+ # num_rows: 8939
277
+ # })
278
+ # validation: Dataset({
279
+ # features: ['q_id', 'question', 'choice0', 'choice1', 'choice2', 'choice3', 'choice4', 'label'],
280
+ # num_rows: 1119
281
+ # })
282
+ # })
283
+ ```
284
+
285
+ An example of the JCommonsenseQA looks as follows:
286
+
287
+ ```json
288
+ {
289
+ "q_id": 3016,
290
+ "question": "会社の最高責任者を何というか? (What do you call the chief executive officer of a company?)",
291
+ "choice0": "社長 (president)",
292
+ "choice1": "教師 (teacher)",
293
+ "choice2": "部長 (manager)",
294
+ "choice3": "バイト (part-time worker)",
295
+ "choice4": "部下 (subordinate)",
296
+ "label": 0
297
+ }
298
+ ```
299
+
300
+ ### Data Fields
301
+
302
+ #### MARC-ja
303
+
304
+ - `sentence_pair_id`: ID of the sentence pair
305
+ - `yjcaptions_id`: sentence ids in yjcaptions (explained below)
306
+ - `sentence1`: first sentence
307
+ - `sentence2`: second sentence
308
+ - `label`: sentence similarity: 5 (equivalent meaning) - 0 (completely different meaning)
309
+
310
+ ##### Explanation for `yjcaptions_id`
311
+
312
+ From [the official README.md](https://github.com/yahoojapan/JGLUE#explanation-for-yjcaptions_id), there are the following two cases:
313
+
314
+ 1. sentence pairs in one image: `(image id)-(sentence1 id)-(sentence2 id)`
315
+ - e.g., 723-844-847
316
+ - a sentence id starting with "g" means a sentence generated by a crowdworker (e.g., 69501-75698-g103): only for JNLI
317
+ 2. sentence pairs in two images: `(image id of sentence1)_(image id of sentence2)-(sentence1 id)-(sentence2 id)`
318
+ - e.g., 91337_217583-96105-91680
319
+
320
+ #### JNLI
321
+
322
+ - `sentence_pair_id`: ID of the sentence pair
323
+ - `yjcaptions_id`: sentence ids in the yjcaptions
324
+ - `sentence1`: premise sentence
325
+ - `sentence2`: hypothesis sentence
326
+ - `label`: inference relation
327
+
328
+ #### JSQuAD
329
+
330
+ - `title`: title of a Wikipedia article
331
+ - `paragraphs`: a set of paragraphs
332
+ - `qas`: a set of pairs of a question and its answer
333
+ - `question`: question
334
+ - `id`: id of a question
335
+ - `answers`: a set of answers
336
+ - `text`: answer text
337
+ - `answer_start`: start position (character index)
338
+ - `is_impossible`: all the values are false
339
+ - `context`: a concatenation of the title and paragraph
340
+
341
+ #### JCommonsenseQA
342
+
343
+ - `q_id`: ID of the question
344
+ - `question`: question
345
+ - `choice{0..4}`: choice
346
+ - `label`: correct choice id
347
+
348
+ ### Data Splits
349
+
350
+ From [the official README.md](https://github.com/yahoojapan/JGLUE/blob/main/README.md#tasksdatasets):
351
+
352
+ > Only train/dev sets are available now, and the test set will be available after the leaderboard is made public.
353
+
354
+ | Task | Dataset | Train | Dev | Test |
355
+ |------------------------------|----------------|--------:|------:|------:|
356
+ | Text Classification | MARC-ja | 187,528 | 5,654 | 5,639 |
357
+ | | JCoLA&dagger; | - | - | - |
358
+ | Sentence Pair Classification | JSTS | 12,451 | 1,457 | 1,589 |
359
+ | | JNLI | 20,073 | 2,434 | 2,508 |
360
+ | Question Answering | JSQuAD | 62,859 | 4,442 | 4,420 |
361
+ | | JCommonsenseQA | 8,939 | 1,119 | 1,118 |
362
+
363
+ > &dagger;JCoLA will be added soon.
364
+
365
+ ## Dataset Creation
366
+
367
+ ### Curation Rationale
368
+
369
+ From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
370
+
371
+ > JGLUE is designed to cover a wide range of GLUE and SuperGLUE tasks and consists of three kinds of tasks: text classification, sentence pair classification, and question answering.
372
+
373
+ ### Source Data
374
+
375
+ #### Initial Data Collection and Normalization
376
+
377
+ [More Information Needed]
378
+
379
+ #### Who are the source language producers?
380
+
381
+ - The source language producers are users of Amazon (MARC-ja), crowd-workers of Yahoo! Crowdsourcing (JSTS, JNLI and JCommonsenseQA), writers of the Japanese Wikipedia (JSQuAD).
382
+
383
+ ### Annotations
384
+
385
+ #### Annotation process
386
+
387
+ ##### MARC-ja
388
+
389
+ From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
390
+
391
+ > As one of the text classification datasets, we build a dataset based on the Multilingual Amazon Reviews Corpus (MARC) (Keung et al., 2020). MARC is a multilingual corpus of product reviews with 5-level star ratings (1-5) on the Amazon shopping site. This corpus covers six languages, including English and Japanese. For JGLUE, we use the Japanese part of MARC and to make it easy for both humans and computers to judge a class label, we cast the text classification task as a binary classification task, where 1- and 2-star ratings are converted to “negative”, and 4 and 5 are converted to “positive”. We do not use reviews with a 3-star rating.
392
+
393
+ > One of the problems with MARC is that it sometimes contains data where the rating diverges from the review text. This happens, for example, when a review with positive content is given a rating of 1 or 2. These data degrade the quality of our dataset. To improve the quality of the dev/test instances used for evaluation, we crowdsource a positive/negative judgment task for approximately 12,000 reviews. We adopt only reviews with the same votes from 7 or more out of 10 workers and assign a label of the maximum votes to these reviews. We divide the resulting reviews into dev/test data.
394
+
395
+ > We obtained 5,654 and 5,639 instances for the dev and test data, respectively, through the above procedure. For the training data, we extracted 187,528 instances directly from MARC without performing the cleaning procedure because of the large number of training instances. The statistics of MARC-ja are listed in Table 2. For the evaluation metric for MARC-ja, we use accuracy because it is a binary classification task of texts.
396
+
397
+ ##### JSTS and JNLI
398
+
399
+ From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
400
+
401
+ > For the sentence pair classification datasets, we construct a semantic textual similarity (STS) dataset, JSTS, and a natural language inference (NLI) dataset, JNLI.
402
+
403
+ > ### Overview
404
+ > STS is a task of estimating the semantic similarity of a sentence pair. Gold similarity is usually assigned as an average of the integer values 0 (completely different meaning) to 5 (equivalent meaning) assigned by multiple workers through crowdsourcing.
405
+
406
+ > NLI is a task of recognizing the inference relation that a premise sentence has to a hypothesis sentence. Inference relations are generally defined by three labels: “entailment”, “contradiction”, and “neutral”. Gold inference relations are often assigned by majority voting after collecting answers from multiple workers through crowdsourcing.
407
+
408
+ > For the STS and NLI tasks, STS-B (Cer et al., 2017) and MultiNLI (Williams et al., 2018) are included in GLUE, respectively. As Japanese datasets, JSNLI (Yoshikoshi et al., 2020) is a machine translated dataset of the NLI dataset SNLI (Stanford NLI), and JSICK (Yanaka and Mineshima, 2021) is a human translated dataset of the STS/NLI dataset SICK (Marelli et al., 2014). As mentioned in Section 1, these have problems originating from automatic/manual translations. To solve this problem, we construct STS/NLI datasets in Japanese from scratch. We basically extract sentence pairs in JSTS and JNLI from the Japanese version of the MS COCO Caption Dataset (Chen et al., 2015), the YJ Captions Dataset (Miyazaki and Shimizu, 2016). Most of the sentence pairs in JSTS and JNLI overlap, allowing us to analyze the relationship between similarities and inference relations for the same sentence pairs like SICK and JSICK.
409
+
410
+ > The similarity value in JSTS is assigned a real number from 0 to 5 as in STS-B. The inference relation in JNLI is assigned from the above three labels as in SNLI and MultiNLI. The definitions of the inference relations are also based on SNLI.
411
+
412
+ > ### Method of Construction
413
+ > Our construction flow for JSTS and JNLI is shown in Figure 1. Basically, two captions for the same image of YJ Captions are used as sentence pairs. For these sentence pairs, similarities and NLI relations of entailment and neutral are obtained by crowdsourcing. However, it is difficult to collect sentence pairs with low similarity and contradiction relations from captions for the same image. To solve this problem, we collect sentence pairs with low similarity from captions for different images. We collect contradiction relations by asking workers to write contradictory sentences for a given caption.
414
+
415
+ > The detailed construction procedure for JSTS and JNLI is described below.
416
+ > 1. We crowdsource an STS task using two captions for the same image from YJ Captions. We ask five workers to answer the similarity between two captions and take the mean value as the gold similarity. We delete sentence pairs with a large variance in the answers because such pairs have poor answer quality. We performed this task on 16,000 sentence pairs and deleted sentence pairs with a similarity variance of 1.0 or higher, resulting in the collection of 10,236 sentence pairs with gold similarity. We refer to this collected data as JSTS-A.
417
+ > 2. To collect sentence pairs with low similarity, we crowdsource the same STS task as Step 1 using sentence pairs of captions for different images. We conducted this task on 4,000 sentence pairs and collected 2,970 sentence pairs with gold similarity. We refer to this collected data as JSTS-B.
418
+ > 3. For JSTS-A, we crowdsource an NLI task. Since inference relations are directional, we obtain inference relations in both directions for sentence pairs. As mentioned earlier,it is difficult to collect instances of contradiction from JSTS-A, which was collected from the captions of the same images,and thus we collect instances of entailment and neutral in this step. We collect inference relation answers from 10 workers. If six or more people give the same answer, we adopt it as the gold label if it is entailment or neutral. To obtain inference relations in both directions for JSTS-A, we performed this task on 20,472 sentence pairs, twice as many as JSTS-A. As a result, we collected inference relations for 17,501 sentence pairs. We refer to this collected data as JNLI-A. We do not use JSTS-B for the NLI task because it is difficult to define and determine the inference relations between captions of different images.
419
+ > 4. To collect NLI instances of contradiction, we crowdsource a task of writing four contradictory sentences for each caption in YJCaptions. From the written sentences, we remove sentence pairs with an edit distance of 0.75 or higher to remove low-quality sentences, such as short sentences and sentences with low relevance to the original sentence. Furthermore, we perform a one-way NLI task with 10 workers to verify whether the created sentence pairs are contradictory. Only the sentence pairs answered as contradiction by at least six workers are adopted. Finally,since the contradiction relation has no direction, we automatically assign contradiction in the opposite direction of the adopted sentence pairs. Using 1,800 captions, we acquired 7,200 sentence pairs, from which we collected 3,779 sentence pairs to which we assigned the one-way contradiction relation.By automatically assigning the contradiction relation in the opposite direction, we doubled the number of instances to 7,558. We refer to this collected data as JNLI-C.
420
+ > 5. For the 3,779 sentence pairs collected in Step 4, we crowdsource an STS task, assigning similarity and filtering in the same way as in Steps1 and 2. In this way, we collected 2,303 sentence pairs with gold similarity from 3,779 pairs. We refer to this collected data as JSTS-C.
421
+
422
+ ##### JSQuAD
423
+
424
+ From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
425
+
426
+ > As QA datasets, we build a Japanese version of SQuAD (Rajpurkar et al., 2016), one of the datasets of reading comprehension, and a Japanese version ofCommonsenseQA, which is explained in the next section.
427
+
428
+ > Reading comprehension is the task of reading a document and answering questions about it. Many reading comprehension evaluation sets have been built in English, followed by those in other languages or multilingual ones.
429
+
430
+ > In Japanese, reading comprehension datasets for quizzes (Suzukietal.,2018) and those in the drivingdomain (Takahashi et al., 2019) have been built, but none are in the general domain. We use Wikipedia to build a dataset for the general domain. The construction process is basically based on SQuAD 1.1 (Rajpurkar et al., 2016).
431
+
432
+ > First, to extract high-quality articles from Wikipedia, we use Nayuki, which estimates the quality of articles on the basis of hyperlinks in Wikipedia. We randomly chose 822 articles from the top-ranked 10,000 articles. For example, the articles include “熊本県 (Kumamoto Prefecture)” and “フランス料理 (French cuisine)”. Next, we divide an article into paragraphs, present each paragraph to crowdworkers, and ask them to write questions and answers that can be answered if one understands the paragraph. Figure 2 shows an example of JSQuAD. We ask workers to write two additional answers for the dev and test sets to make the system evaluation robust.
433
+
434
+ ##### JCommonsenseQA
435
+
436
+ From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
437
+
438
+ > ### Overview
439
+ > JCommonsenseQA is a Japanese version of CommonsenseQA (Talmor et al., 2019), which consists of five choice QA to evaluate commonsense reasoning ability. Figure 3 shows examples of JCommonsenseQA. In the same way as CommonsenseQA, JCommonsenseQA is built using crowdsourcing with seeds extracted from the knowledge base ConceptNet (Speer et al., 2017). ConceptNet is a multilingual knowledge base that consists of triplets of two concepts and their relation. The triplets are directional and represented as (source concept, relation, target concept), for example (bullet train, AtLocation, station).
440
+
441
+ > ### Method of Construction
442
+ > The construction flow for JCommonsenseQA is shown in Figure 4. First, we collect question sets (QSs) from ConceptNet, each of which consists of a source concept and three target concepts that have the same relation to the source concept. Next, for each QS, we crowdAtLocation 2961source a task of writing a question with only one target concept as the answer and a task of adding two distractors. We describe the detailed construction procedure for JCommonsenseQA below, showing how it differs from CommonsenseQA.
443
+
444
+ > 1. We collect Japanese QSs from ConceptNet. CommonsenseQA uses only forward relations (source concept, relation, target concept) excluding general ones such as “RelatedTo” and “IsA”. JCommonsenseQA similarly uses a set of 22 relations5, excluding general ones, but the direction of the relations is bidirectional to make the questions more diverse. In other words, we also use relations in the opposite direction (source concept, relation−1, target concept).6 With this setup, we extracted 43,566 QSs with Japanese source/target concepts and randomly selected 7,500 from them.
445
+ > 2. Some low-quality questions in CommonsenseQA contain distractors that can be considered to be an answer. To improve the quality of distractors, we add the following two processes that are not adopted in CommonsenseQA. First, if three target concepts of a QS include a spelling variation or a synonym of one another, this QS is removed. To identify spelling variations, we use the word ID of the morphological dictionary Juman Dic7. Second, we crowdsource a task of judging whether target concepts contain a synonym. As a result, we adopted 5,920 QSs from 7,500.
446
+ > 3. For each QS, we crowdsource a task of writing a question sentence in which only one from the three target concepts is an answer. In the example shown in Figure 4, “駅 (station)” is an answer, and the others are distractors. To remove low quality question sentences, we remove the following question sentences.
447
+ > - Question sentences that contain a choice word(this is because such a question is easily solved).
448
+ > - Question sentences that contain the expression “XX characters”.8 (XX is a number).
449
+ > - Improperly formatted question sentences that do not end with “?”.
450
+ > - As a result, 5,920 × 3 = 17,760question sentences were created, from which we adopted 15,310 by removing inappropriate question sentences.
451
+ > 4. In CommonsenseQA, when adding distractors, one is selected from ConceptNet, and the other is created by crowdsourcing. In JCommonsenseQA, to have a wider variety of distractors, two distractors are created by crowdsourcing instead of selecting from ConceptNet. To improve the quality of the questions9, we remove questions whose added distractors fall into one of the following categories:
452
+ > - Distractors are included in a question sentence.
453
+ > - Distractors overlap with one of existing choices.
454
+ > - As a result, distractors were added to the 15,310 questions, of which we adopted 13,906.
455
+ > 5. We asked three crowdworkers to answer each question and adopt only those answered correctly by at least two workers. As a result, we adopted 11,263 out of the 13,906 questions.
456
+
457
+ #### Who are the annotators?
458
+
459
+ From [the official README.md](https://github.com/yahoojapan/JGLUE/blob/main/README.md#tasksdatasets):
460
+
461
+ > We use Yahoo! Crowdsourcing for all crowdsourcing tasks in constructing the datasets.
462
+
463
+ ### Personal and Sensitive Information
464
+
465
+ [More Information Needed]
466
+
467
+ ## Considerations for Using the Data
468
+
469
+ ### Social Impact of Dataset
470
+
471
+ From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
472
+
473
+ > We build a Japanese NLU benchmark, JGLUE, from scratch without translation to measure the general NLU ability in Japanese. We hope that JGLUE will facilitate NLU research in Japanese.
474
+
475
+ ### Discussion of Biases
476
+
477
+ [More Information Needed]
478
+
479
+ ### Other Known Limitations
480
+
481
+ [More Information Needed]
482
+
483
+ ## Additional Information
484
+
485
+ - 日本語言語理解ベンチマーク JGLUE の構築 〜 自然言語処理モデルの評価用データセットを公開しました - Yahoo! JAPAN Tech Blog https://techblog.yahoo.co.jp/entry/2022122030379907/
486
+
487
+ ### Dataset Curators
488
+
489
+ #### MARC-ja
490
+
491
+ - Keung, Phillip, et al. "The Multilingual Amazon Reviews Corpus." Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP). 2020.
492
+
493
+ #### JSTS and JNLI
494
+
495
+ - Miyazaki, Takashi, and Nobuyuki Shimizu. "Cross-lingual image caption generation." Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). 2016.
496
+
497
+ #### JSQuAD
498
+
499
+ The authors curated the original data for JSQuAD from the Japanese wikipedia dump.
500
+
501
+ #### JCommonsenseQA
502
+
503
+ In the same way as CommonsenseQA, JCommonsenseQA is built using crowdsourcing with seeds extracted from the knowledge base ConceptNet
504
+
505
+ ### Licensing Information
506
+
507
+ > This work is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License.
508
+
509
+ ### Citation Information
510
+
511
+ ```bibtex
512
+ @inproceedings{kurihara-etal-2022-jglue,
513
+ title = "{JGLUE}: {J}apanese General Language Understanding Evaluation",
514
+ author = "Kurihara, Kentaro and
515
+ Kawahara, Daisuke and
516
+ Shibata, Tomohide",
517
+ booktitle = "Proceedings of the Thirteenth Language Resources and Evaluation Conference",
518
+ month = jun,
519
+ year = "2022",
520
+ address = "Marseille, France",
521
+ publisher = "European Language Resources Association",
522
+ url = "https://aclanthology.org/2022.lrec-1.317",
523
+ pages = "2957--2966",
524
+ abstract = "To develop high-performance natural language understanding (NLU) models, it is necessary to have a benchmark to evaluate and analyze NLU ability from various perspectives. While the English NLU benchmark, GLUE, has been the forerunner, benchmarks are now being released for languages other than English, such as CLUE for Chinese and FLUE for French; but there is no such benchmark for Japanese. We build a Japanese NLU benchmark, JGLUE, from scratch without translation to measure the general NLU ability in Japanese. We hope that JGLUE will facilitate NLU research in Japanese.",
525
+ }
526
+ ```
527
+
528
+ ```bibtex
529
+ @InProceedings{Kurihara_nlp2022,
530
+ author = "栗原健太郎 and 河原大輔 and 柴田知秀",
531
+ title = "JGLUE: 日本語言語理解ベンチマーク",
532
+ booktitle = "言語処理学会第 28 回年次大会",
533
+ year = "2022",
534
+ url = "https://www.anlp.jp/proceedings/annual_meeting/2022/pdf_dir/E8-4.pdf"
535
+ note= "in Japanese"
536
+ }
537
+ ```
538
+
539
+ ### Contributions
540
+
541
+ Thanks to [Kentaro Kurihara](https://twitter.com/kkurihara_cs), [Daisuke Kawahara](https://twitter.com/daisukekawahar1), and [Tomohide Shibata](https://twitter.com/stomohide) for creating this dataset.
poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "huggingface-datasets-jglue"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = ["Shunsuke KITADA <shunsuke.kitada.0831@gmail.com>"]
6
+ readme = "README.md"
7
+ packages = []
8
+
9
+ [tool.poetry.dependencies]
10
+ python = "^3.9"
11
+ datasets = "^2.10.0"
12
+ beautifulsoup4 = "^4.11.2"
13
+ mecab-python3 = "^1.0.6"
14
+ pyknp = "^0.6.1"
15
+ mojimoji = "^0.0.12"
16
+
17
+
18
+ [tool.poetry.group.dev.dependencies]
19
+ black = "^23.1.0"
20
+ isort = "^5.12.0"
21
+ flake8 = "^6.0.0"
22
+ mypy = "^1.0.1"
23
+ pytest = "^7.2.1"
24
+
25
+ [build-system]
26
+ requires = ["poetry-core"]
27
+ build-backend = "poetry.core.masonry.api"
tests/JGLUE_test.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets as ds
2
+ import pytest
3
+
4
+
5
+ @pytest.fixture
6
+ def dataset_path() -> str:
7
+ return "JGLUE.py"
8
+
9
+
10
+ @pytest.mark.parametrize(
11
+ argnames="dataset_name, expected_num_train, expected_num_valid,",
12
+ argvalues=(
13
+ ("JSTS", 12451, 1457),
14
+ ("JNLI", 20073, 2434),
15
+ ("JCommonsenseQA", 8939, 1119),
16
+ ),
17
+ )
18
+ def test_load_dataset(
19
+ dataset_path: str,
20
+ dataset_name: str,
21
+ expected_num_train: int,
22
+ expected_num_valid: int,
23
+ ):
24
+ dataset = ds.load_dataset(path=dataset_path, name=dataset_name)
25
+
26
+ assert dataset["train"].num_rows == expected_num_train
27
+ assert dataset["validation"].num_rows == expected_num_valid
28
+
29
+
30
+ def test_load_jsquad(
31
+ dataset_path: str,
32
+ dataset_name: str = "JSQuAD",
33
+ expected_num_train: int = 62859,
34
+ expected_num_valid: int = 4442,
35
+ ):
36
+ dataset = ds.load_dataset(path=dataset_path, name=dataset_name)
37
+
38
+ def count_num_data(split: str) -> int:
39
+ data = dataset[split]["data"]
40
+ assert len(data) == 1
41
+ data = data[0]
42
+
43
+ num_data = 0
44
+ for paragraphs in data["paragraphs"]:
45
+ for qas in paragraphs["qas"]:
46
+ num_data += len(qas["answers"])
47
+ return num_data
48
+
49
+ assert count_num_data("train") == expected_num_train
50
+ assert count_num_data("validation") == expected_num_valid
51
+
52
+
53
+ def test_load_marc_ja(
54
+ dataset_path: str,
55
+ dataset_name: str = "MARC-ja",
56
+ expected_num_train: int = 187528,
57
+ expected_num_valid: int = 5654,
58
+ ):
59
+ dataset = ds.load_dataset(
60
+ path=dataset_path,
61
+ name=dataset_name,
62
+ is_pos_neg=True,
63
+ max_char_length=500,
64
+ filter_review_id_list_valid=True,
65
+ label_conv_review_id_list_valid=True,
66
+ )
67
+
68
+ assert dataset["train"].num_rows == expected_num_train
69
+ assert dataset["validation"].num_rows == expected_num_valid
tests/__init__.py ADDED
File without changes