albertvillanova HF Staff commited on
Commit
0a1a067
·
verified ·
1 Parent(s): 99c1098

Delete loading script

Browse files
Files changed (1) hide show
  1. labels.py +0 -93
labels.py DELETED
@@ -1,93 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """AG News topic classification dataset."""
18
-
19
-
20
- import csv
21
-
22
- import datasets
23
- from datasets.tasks import TextClassification
24
-
25
-
26
- _DESCRIPTION = """\
27
- AG is a collection of more than 1 million news articles. News articles have been
28
- gathered from more than 2000 news sources by ComeToMyHead in more than 1 year of
29
- activity. ComeToMyHead is an academic news search engine which has been running
30
- since July, 2004. The dataset is provided by the academic comunity for research
31
- purposes in data mining (clustering, classification, etc), information retrieval
32
- (ranking, search, etc), xml, data compression, data streaming, and any other
33
- non-commercial activity. For more information, please refer to the link
34
- http://www.di.unipi.it/~gulli/AG_corpus_of_news_articles.html .
35
- The AG's news topic classification dataset is constructed by Xiang Zhang
36
- (xiang.zhang@nyu.edu) from the dataset above. It is used as a text
37
- classification benchmark in the following paper: Xiang Zhang, Junbo Zhao, Yann
38
- LeCun. Character-level Convolutional Networks for Text Classification. Advances
39
- in Neural Information Processing Systems 28 (NIPS 2015).
40
- """
41
-
42
- _CITATION = """\
43
- @inproceedings{Zhang2015CharacterlevelCN,
44
- title={Character-level Convolutional Networks for Text Classification},
45
- author={Xiang Zhang and Junbo Jake Zhao and Yann LeCun},
46
- booktitle={NIPS},
47
- year={2015}
48
- }
49
- """
50
-
51
- _TRAIN_DOWNLOAD_URL = "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/train.csv"
52
- _TEST_DOWNLOAD_URL = "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/test.csv"
53
-
54
-
55
- class AGNews(datasets.GeneratorBasedBuilder):
56
- """AG News topic classification dataset."""
57
-
58
- def _info(self):
59
- return datasets.DatasetInfo(
60
- description=_DESCRIPTION,
61
- features=datasets.Features(
62
- {
63
- "text": datasets.Value("string"),
64
- "label": datasets.features.ClassLabel(names=["World", "Sports", "Business", "Sci/Tech"]),
65
- }
66
- ),
67
- homepage="http://groups.di.unipi.it/~gulli/AG_corpus_of_news_articles.html",
68
- citation=_CITATION,
69
- task_templates=[TextClassification(text_column="text", label_column="label")],
70
- )
71
-
72
- def _split_generators(self, dl_manager):
73
- train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
74
- test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
75
- return [
76
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
77
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
78
- ]
79
-
80
- def _generate_examples(self, filepath):
81
- """Generate AG News examples."""
82
- with open(filepath, encoding="utf-8") as csv_file:
83
- csv_reader = csv.reader(
84
- csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True
85
- )
86
- for id_, row in enumerate(csv_reader):
87
- label, title, description = row
88
- # Original labels are [1, 2, 3, 4] ->
89
- # ['World', 'Sports', 'Business', 'Sci/Tech']
90
- # Re-map to [0, 1, 2, 3].
91
- label = int(label) - 1
92
- text = " ".join((title, description))
93
- yield id_, {"text": text, "label": label}