albertvillanova HF staff commited on
Commit
99d1ad9
1 Parent(s): b7e9afd

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (cdbbed2c77211ef6915e4efb88ad8059093e2a83)
- Delete loading script (fcd1006765c26544ef16be65f6bbe5c64d8d3430)

README.md CHANGED
@@ -17,13 +17,20 @@ dataset_info:
17
  '4': technology
18
  splits:
19
  - name: test
20
- num_bytes: 168645860
21
  num_examples: 60000
22
  - name: train
23
- num_bytes: 1257931136
24
  num_examples: 450000
25
- download_size: 384269937
26
- dataset_size: 1426576996
 
 
 
 
 
 
 
27
  ---
28
 
29
  # Dataset Card for "sogou_news"
 
17
  '4': technology
18
  splits:
19
  - name: test
20
+ num_bytes: 168615812
21
  num_examples: 60000
22
  - name: train
23
+ num_bytes: 1257705776
24
  num_examples: 450000
25
+ download_size: 632540318
26
+ dataset_size: 1426321588
27
+ configs:
28
+ - config_name: default
29
+ data_files:
30
+ - split: test
31
+ path: data/test-*
32
+ - split: train
33
+ path: data/train-*
34
  ---
35
 
36
  # Dataset Card for "sogou_news"
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9302a604121f4f11542873c6706179ab76de4a0fdc88db14c67dc7a20169bbfa
3
+ size 75513418
data/train-00000-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9d9bb25f4c8f02d08b4f69afe7cf67ed28943feae3e71110ee868753ca43693
3
+ size 243100183
data/train-00001-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50cc4c05fdfbf260b316eff1922d488945968cf8bfa8901c67998d2adf8d38ba
3
+ size 211674942
data/train-00002-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf93a43a429a24cc29d242c7660e3f867e5cf249fc032eff8f7b7e1b454f1b34
3
+ size 102251775
sogou_news.py DELETED
@@ -1,94 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Sogou News"""
18
-
19
-
20
- import csv
21
- import ctypes
22
-
23
- import datasets
24
-
25
-
26
- csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
27
-
28
-
29
- _CITATION = """\
30
- @misc{zhang2015characterlevel,
31
- title={Character-level Convolutional Networks for Text Classification},
32
- author={Xiang Zhang and Junbo Zhao and Yann LeCun},
33
- year={2015},
34
- eprint={1509.01626},
35
- archivePrefix={arXiv},
36
- primaryClass={cs.LG}
37
- }
38
- """
39
-
40
- _DESCRIPTION = """\
41
- The Sogou News dataset is a mixture of 2,909,551 news articles from the SogouCA and SogouCS news corpora, in 5 categories.
42
- The number of training samples selected for each class is 90,000 and testing 12,000. Note that the Chinese characters have been converted to Pinyin.
43
- classification labels of the news are determined by their domain names in the URL. For example, the news with
44
- URL http://sports.sohu.com is categorized as a sport class.
45
- """
46
-
47
- _DATA_URL = "https://s3.amazonaws.com/fast-ai-nlp/sogou_news_csv.tgz"
48
-
49
-
50
- class Sogou_News(datasets.GeneratorBasedBuilder):
51
- """Sogou News dataset"""
52
-
53
- def _info(self):
54
- return datasets.DatasetInfo(
55
- description=_DESCRIPTION,
56
- features=datasets.Features(
57
- {
58
- "title": datasets.Value("string"),
59
- "content": datasets.Value("string"),
60
- "label": datasets.features.ClassLabel(
61
- names=["sports", "finance", "entertainment", "automobile", "technology"]
62
- ),
63
- }
64
- ),
65
- # No default supervised_keys (as we have to pass both premise
66
- # and hypothesis as input).
67
- supervised_keys=None,
68
- homepage="", # didn't find a real homepage
69
- citation=_CITATION,
70
- )
71
-
72
- def _split_generators(self, dl_manager):
73
- archive = dl_manager.download(_DATA_URL)
74
-
75
- return [
76
- datasets.SplitGenerator(
77
- name=datasets.Split.TEST,
78
- gen_kwargs={"filepath": "sogou_news_csv/test.csv", "files": dl_manager.iter_archive(archive)},
79
- ),
80
- datasets.SplitGenerator(
81
- name=datasets.Split.TRAIN,
82
- gen_kwargs={"filepath": "sogou_news_csv/train.csv", "files": dl_manager.iter_archive(archive)},
83
- ),
84
- ]
85
-
86
- def _generate_examples(self, filepath, files):
87
- """This function returns the examples in the raw (text) form."""
88
- for path, f in files:
89
- if path == filepath:
90
- lines = (line.decode("utf-8") for line in f)
91
- data = csv.reader(lines)
92
- for id_, row in enumerate(data):
93
- yield id_, {"title": row[1], "content": row[2], "label": int(row[0]) - 1}
94
- break