albertvillanova HF staff commited on
Commit
bcc8ae1
1 Parent(s): b611a7b

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (9fa34722105606c1cd6f70916a733de60d0d6a5f)
- Delete loading script (8603fef4d12b1c37703a1149d858ae37d904d103)

README.md CHANGED
@@ -38,16 +38,25 @@ dataset_info:
38
  dtype: string
39
  splits:
40
  - name: train
41
- num_bytes: 14362998
42
  num_examples: 70351
43
  - name: test
44
- num_bytes: 2146857
45
  num_examples: 9692
46
  - name: validation
47
- num_bytes: 2407643
48
  num_examples: 10329
49
- download_size: 18284850
50
- dataset_size: 18917498
 
 
 
 
 
 
 
 
 
51
  ---
52
 
53
  # Dataset Card for ilist
 
38
  dtype: string
39
  splits:
40
  - name: train
41
+ num_bytes: 14362966
42
  num_examples: 70351
43
  - name: test
44
+ num_bytes: 2146853
45
  num_examples: 9692
46
  - name: validation
47
+ num_bytes: 2407635
48
  num_examples: 10329
49
+ download_size: 8697678
50
+ dataset_size: 18917454
51
+ configs:
52
+ - config_name: default
53
+ data_files:
54
+ - split: train
55
+ path: data/train-*
56
+ - split: test
57
+ path: data/test-*
58
+ - split: validation
59
+ path: data/validation-*
60
  ---
61
 
62
  # Dataset Card for ilist
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d51eb14a4b5fcc20ebbc38655adff97f67c5a0a6fce24ea8a7e3ae1b79dd724e
3
+ size 993642
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:855901391da0e3bd1ef32b747d6c27aa12dd87d65974dd1276a19b9aa0c64e1e
3
+ size 6611426
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9687c428d3263f17877380837531242f3219ab95b7ccbf1d44975b731181ed73
3
+ size 1092610
ilist.py DELETED
@@ -1,117 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Indo-Aryan Language Identification Shared Task Dataset"""
16
-
17
-
18
- import datasets
19
- from datasets.tasks import TextClassification
20
-
21
-
22
- _CITATION = r"""\
23
- @inproceedings{zampieri-etal-2018-language,
24
- title = "Language Identification and Morphosyntactic Tagging: The Second {V}ar{D}ial Evaluation Campaign",
25
- author = {Zampieri, Marcos and
26
- Malmasi, Shervin and
27
- Nakov, Preslav and
28
- Ali, Ahmed and
29
- Shon, Suwon and
30
- Glass, James and
31
- Scherrer, Yves and
32
- Samard{\v{z}}i{\'c}, Tanja and
33
- Ljube{\v{s}}i{\'c}, Nikola and
34
- Tiedemann, J{\"o}rg and
35
- van der Lee, Chris and
36
- Grondelaers, Stefan and
37
- Oostdijk, Nelleke and
38
- Speelman, Dirk and
39
- van den Bosch, Antal and
40
- Kumar, Ritesh and
41
- Lahiri, Bornini and
42
- Jain, Mayank},
43
- booktitle = "Proceedings of the Fifth Workshop on {NLP} for Similar Languages, Varieties and Dialects ({V}ar{D}ial 2018)",
44
- month = aug,
45
- year = "2018",
46
- address = "Santa Fe, New Mexico, USA",
47
- publisher = "Association for Computational Linguistics",
48
- url = "https://aclanthology.org/W18-3901",
49
- pages = "1--17",
50
- }
51
- """
52
-
53
- _DESCRIPTION = """\
54
- This dataset is introduced in a task which aimed at identifying 5 closely-related languages of Indo-Aryan language family –
55
- Hindi (also known as Khari Boli), Braj Bhasha, Awadhi, Bhojpuri, and Magahi.
56
- """
57
-
58
- _URL = "https://raw.githubusercontent.com/kmi-linguistics/vardial2018/master/dataset/{}.txt"
59
-
60
-
61
- class Ilist(datasets.GeneratorBasedBuilder):
62
- def _info(self):
63
- return datasets.DatasetInfo(
64
- description=_DESCRIPTION,
65
- features=datasets.Features(
66
- {
67
- "language_id": datasets.ClassLabel(names=["AWA", "BRA", "MAG", "BHO", "HIN"]),
68
- "text": datasets.Value("string"),
69
- }
70
- ),
71
- supervised_keys=None,
72
- homepage="https://github.com/kmi-linguistics/vardial2018",
73
- citation=_CITATION,
74
- task_templates=[TextClassification(text_column="text", label_column="language_id")],
75
- )
76
-
77
- def _split_generators(self, dl_manager):
78
- filepaths = dl_manager.download_and_extract(
79
- {
80
- "train": _URL.format("train"),
81
- "test": _URL.format("gold"),
82
- "dev": _URL.format("dev"),
83
- }
84
- )
85
-
86
- return [
87
- datasets.SplitGenerator(
88
- name=datasets.Split.TRAIN,
89
- # These kwargs will be passed to _generate_examples
90
- gen_kwargs={
91
- "filepath": filepaths["train"],
92
- },
93
- ),
94
- datasets.SplitGenerator(
95
- name=datasets.Split.TEST,
96
- # These kwargs will be passed to _generate_examples
97
- gen_kwargs={
98
- "filepath": filepaths["test"],
99
- },
100
- ),
101
- datasets.SplitGenerator(
102
- name=datasets.Split.VALIDATION,
103
- # These kwargs will be passed to _generate_examples
104
- gen_kwargs={
105
- "filepath": filepaths["dev"],
106
- },
107
- ),
108
- ]
109
-
110
- def _generate_examples(self, filepath):
111
- """Yields examples."""
112
- with open(filepath, "r", encoding="utf-8") as file:
113
- for idx, row in enumerate(file):
114
- row = row.strip("\n").split("\t")
115
- if len(row) == 1:
116
- continue
117
- yield idx, {"language_id": row[1], "text": row[0]}