Datasets:
Tasks:
Text Classification
Sub-tasks:
natural-language-inference
Languages:
Tagalog
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
machine-generated
Source Datasets:
original
ArXiv:
License:
Update files from the datasets library (from 1.6.2)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.6.2
- dataset_infos.json +1 -1
- dummy/1.0.0/dummy_data.zip +1 -1
- newsph_nli.py +11 -9
dataset_infos.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"default": {"description": " First benchmark dataset for sentence entailment in the low-resource Filipino language. Constructed through exploting the structure of news articles. Contains 600,000 premise-hypothesis pairs, in 70-15-15 split for training, validation, and testing.\n", "citation": " @article{cruz2020investigating,\n title={Investigating the True Performance of Transformers in Low-Resource Languages: A Case Study in Automatic Corpus Creation},\n author={Jan Christian Blaise Cruz and Jose Kristian Resabal and James Lin and Dan John Velasco and Charibeth Cheng},\n journal={arXiv preprint arXiv:2010.11574},\n year={2020}\n }\n", "homepage": "https://github.com/jcblaisecruz02/Filipino-Text-Benchmarks", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["0", "1"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "newsph_nli", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 154510599, "num_examples": 420000, "dataset_name": "newsph_nli"}, "test": {"name": "test", "num_bytes":
|
|
|
1 |
+
{"default": {"description": " First benchmark dataset for sentence entailment in the low-resource Filipino language. Constructed through exploting the structure of news articles. Contains 600,000 premise-hypothesis pairs, in 70-15-15 split for training, validation, and testing.\n", "citation": " @article{cruz2020investigating,\n title={Investigating the True Performance of Transformers in Low-Resource Languages: A Case Study in Automatic Corpus Creation},\n author={Jan Christian Blaise Cruz and Jose Kristian Resabal and James Lin and Dan John Velasco and Charibeth Cheng},\n journal={arXiv preprint arXiv:2010.11574},\n year={2020}\n }\n", "homepage": "https://github.com/jcblaisecruz02/Filipino-Text-Benchmarks", "license": "Filipino-Text-Benchmarks is licensed under the GNU General Public License v3.0", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["0", "1"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "newsph_nli", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 154510599, "num_examples": 420000, "dataset_name": "newsph_nli"}, "test": {"name": "test", "num_bytes": 3283665, "num_examples": 9000, "dataset_name": "newsph_nli"}, "validation": {"name": "validation", "num_bytes": 33015530, "num_examples": 90000, "dataset_name": "newsph_nli"}}, "download_checksums": {"https://s3.us-east-2.amazonaws.com/blaisecruz.com/datasets/newsph/newsph-nli.zip": {"num_bytes": 76565287, "checksum": "544823dffe5b253718746ecc66d34116d918deb9886a58077447aeafe9538374"}}, "download_size": 76565287, "post_processing_size": null, "dataset_size": 190809794, "size_in_bytes": 267375081}}
|
dummy/1.0.0/dummy_data.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 3286
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c1d59c173a089f7954cb9592f3316e2bee6e86cb82cb379fd12e83f4c76f4242
|
3 |
size 3286
|
newsph_nli.py
CHANGED
@@ -21,22 +21,24 @@ import datasets
|
|
21 |
|
22 |
|
23 |
_DESCRIPTION = """\
|
24 |
-
|
|
|
|
|
25 |
"""
|
26 |
|
27 |
_CITATION = """\
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
"""
|
35 |
|
36 |
_HOMEPAGE = "https://github.com/jcblaisecruz02/Filipino-Text-Benchmarks"
|
37 |
|
38 |
# TODO: Add the licence for the dataset here if you can find it
|
39 |
-
_LICENSE = ""
|
40 |
|
41 |
_URL = "https://s3.us-east-2.amazonaws.com/blaisecruz.com/datasets/newsph/newsph-nli.zip"
|
42 |
|
@@ -68,7 +70,7 @@ class NewsphNli(datasets.GeneratorBasedBuilder):
|
|
68 |
data_dir = dl_manager.download_and_extract(_URL)
|
69 |
download_path = os.path.join(data_dir, "newsph-nli")
|
70 |
train_path = os.path.join(download_path, "train.csv")
|
71 |
-
test_path = os.path.join(download_path, "
|
72 |
validation_path = os.path.join(download_path, "valid.csv")
|
73 |
|
74 |
return [
|
|
|
21 |
|
22 |
|
23 |
_DESCRIPTION = """\
|
24 |
+
First benchmark dataset for sentence entailment in the low-resource Filipino language.
|
25 |
+
Constructed through exploting the structure of news articles. Contains 600,000 premise-hypothesis pairs,
|
26 |
+
in 70-15-15 split for training, validation, and testing.
|
27 |
"""
|
28 |
|
29 |
_CITATION = """\
|
30 |
+
@article{cruz2020investigating,
|
31 |
+
title={Investigating the True Performance of Transformers in Low-Resource Languages: A Case Study in Automatic Corpus Creation},
|
32 |
+
author={Jan Christian Blaise Cruz and Jose Kristian Resabal and James Lin and Dan John Velasco and Charibeth Cheng},
|
33 |
+
journal={arXiv preprint arXiv:2010.11574},
|
34 |
+
year={2020}
|
35 |
+
}
|
36 |
"""
|
37 |
|
38 |
_HOMEPAGE = "https://github.com/jcblaisecruz02/Filipino-Text-Benchmarks"
|
39 |
|
40 |
# TODO: Add the licence for the dataset here if you can find it
|
41 |
+
_LICENSE = "Filipino-Text-Benchmarks is licensed under the GNU General Public License v3.0"
|
42 |
|
43 |
_URL = "https://s3.us-east-2.amazonaws.com/blaisecruz.com/datasets/newsph/newsph-nli.zip"
|
44 |
|
|
|
70 |
data_dir = dl_manager.download_and_extract(_URL)
|
71 |
download_path = os.path.join(data_dir, "newsph-nli")
|
72 |
train_path = os.path.join(download_path, "train.csv")
|
73 |
+
test_path = os.path.join(download_path, "test.csv")
|
74 |
validation_path = os.path.join(download_path, "valid.csv")
|
75 |
|
76 |
return [
|