albertvillanova HF staff commited on
Commit
fcd1006
1 Parent(s): cdbbed2

Delete loading script

Browse files
Files changed (1) hide show
  1. sogou_news.py +0 -94
sogou_news.py DELETED
@@ -1,94 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Sogou News"""
18
-
19
-
20
- import csv
21
- import ctypes
22
-
23
- import datasets
24
-
25
-
26
- csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
27
-
28
-
29
- _CITATION = """\
30
- @misc{zhang2015characterlevel,
31
- title={Character-level Convolutional Networks for Text Classification},
32
- author={Xiang Zhang and Junbo Zhao and Yann LeCun},
33
- year={2015},
34
- eprint={1509.01626},
35
- archivePrefix={arXiv},
36
- primaryClass={cs.LG}
37
- }
38
- """
39
-
40
- _DESCRIPTION = """\
41
- The Sogou News dataset is a mixture of 2,909,551 news articles from the SogouCA and SogouCS news corpora, in 5 categories.
42
- The number of training samples selected for each class is 90,000 and testing 12,000. Note that the Chinese characters have been converted to Pinyin.
43
- classification labels of the news are determined by their domain names in the URL. For example, the news with
44
- URL http://sports.sohu.com is categorized as a sport class.
45
- """
46
-
47
- _DATA_URL = "https://s3.amazonaws.com/fast-ai-nlp/sogou_news_csv.tgz"
48
-
49
-
50
- class Sogou_News(datasets.GeneratorBasedBuilder):
51
- """Sogou News dataset"""
52
-
53
- def _info(self):
54
- return datasets.DatasetInfo(
55
- description=_DESCRIPTION,
56
- features=datasets.Features(
57
- {
58
- "title": datasets.Value("string"),
59
- "content": datasets.Value("string"),
60
- "label": datasets.features.ClassLabel(
61
- names=["sports", "finance", "entertainment", "automobile", "technology"]
62
- ),
63
- }
64
- ),
65
- # No default supervised_keys (as we have to pass both premise
66
- # and hypothesis as input).
67
- supervised_keys=None,
68
- homepage="", # didn't find a real homepage
69
- citation=_CITATION,
70
- )
71
-
72
- def _split_generators(self, dl_manager):
73
- archive = dl_manager.download(_DATA_URL)
74
-
75
- return [
76
- datasets.SplitGenerator(
77
- name=datasets.Split.TEST,
78
- gen_kwargs={"filepath": "sogou_news_csv/test.csv", "files": dl_manager.iter_archive(archive)},
79
- ),
80
- datasets.SplitGenerator(
81
- name=datasets.Split.TRAIN,
82
- gen_kwargs={"filepath": "sogou_news_csv/train.csv", "files": dl_manager.iter_archive(archive)},
83
- ),
84
- ]
85
-
86
- def _generate_examples(self, filepath, files):
87
- """This function returns the examples in the raw (text) form."""
88
- for path, f in files:
89
- if path == filepath:
90
- lines = (line.decode("utf-8") for line in f)
91
- data = csv.reader(lines)
92
- for id_, row in enumerate(data):
93
- yield id_, {"title": row[1], "content": row[2], "label": int(row[0]) - 1}
94
- break