Convert dataset to Parquet

#3
by albertvillanova HF staff - opened
README.md CHANGED
@@ -32,10 +32,15 @@ dataset_info:
32
  '3': Entertainment
33
  splits:
34
  - name: train
35
- num_bytes: 3797368
36
  num_examples: 8116
37
- download_size: 610592
38
- dataset_size: 3797368
 
 
 
 
 
39
  ---
40
 
41
  # Dataset Card for Myanmar_News
 
32
  '3': Entertainment
33
  splits:
34
  - name: train
35
+ num_bytes: 3797364
36
  num_examples: 8116
37
+ download_size: 1185371
38
+ dataset_size: 3797364
39
+ configs:
40
+ - config_name: default
41
+ data_files:
42
+ - split: train
43
+ path: data/train-*
44
  ---
45
 
46
  # Dataset Card for Myanmar_News
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c20b838d8d318845eb047992cfa72c8a1476bd46f89805c168ff5311d8386018
3
+ size 1185371
myanmar_news.py DELETED
@@ -1,83 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
-
17
- import csv
18
- import os
19
-
20
- import datasets
21
- from datasets.tasks import TextClassification
22
-
23
-
24
- # no BibTeX citation
25
- _CITATION = ""
26
-
27
- _DESCRIPTION = """\
28
- The Myanmar news dataset contains article snippets in four categories:
29
- Business, Entertainment, Politics, and Sport.
30
-
31
- These were collected in October 2017 by Aye Hninn Khine
32
- """
33
-
34
- _LICENSE = "GPL-3.0"
35
-
36
- _URLs = {"default": "https://github.com/Georeactor/MyanmarNewsClassificationSystem/archive/main.zip"}
37
-
38
-
39
- class MyanmarNews(datasets.GeneratorBasedBuilder):
40
- VERSION = datasets.Version("1.0.1")
41
-
42
- def _info(self):
43
- class_names = ["Sport", "Politic", "Business", "Entertainment"]
44
- features = datasets.Features(
45
- {
46
- "text": datasets.Value("string"),
47
- "category": datasets.ClassLabel(names=class_names),
48
- }
49
- )
50
- return datasets.DatasetInfo(
51
- description=_DESCRIPTION,
52
- features=features,
53
- supervised_keys=None,
54
- homepage="https://github.com/ayehninnkhine/MyanmarNewsClassificationSystem",
55
- license=_LICENSE,
56
- citation=_CITATION,
57
- task_templates=[TextClassification(text_column="text", label_column="category")],
58
- )
59
-
60
- def _split_generators(self, dl_manager):
61
- my_urls = _URLs[self.config.name]
62
- data_dir = dl_manager.download_and_extract(my_urls)
63
- return [
64
- datasets.SplitGenerator(
65
- name=datasets.Split.TRAIN,
66
- gen_kwargs={
67
- "filepath": os.path.join(data_dir, "MyanmarNewsClassificationSystem-main", "topics.csv"),
68
- "split": "train",
69
- },
70
- ),
71
- ]
72
-
73
- def _generate_examples(self, filepath, split):
74
- with open(filepath, encoding="utf-8") as f:
75
- rdr = csv.reader(f, delimiter="\t")
76
- next(rdr)
77
- rownum = 0
78
- for row in rdr:
79
- rownum += 1
80
- yield rownum, {
81
- "text": row[0],
82
- "category": row[1],
83
- }