system HF staff commited on
Commit
f14d494
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"default": {"description": "The Sogou News dataset is a mixture of 2,909,551 news articles from the SogouCA and SogouCS news corpora, in 5 categories. \nThe number of training samples selected for each class is 90,000 and testing 12,000. Note that the Chinese characters have been converted to Pinyin.\nclassification labels of the news are determined by their domain names in the URL. For example, the news with\nURL http://sports.sohu.com is categorized as a sport class.\n", "citation": "@misc{zhang2015characterlevel,\n title={Character-level Convolutional Networks for Text Classification},\n author={Xiang Zhang and Junbo Zhao and Yann LeCun},\n year={2015},\n eprint={1509.01626},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n", "homepage": "", "license": "", "features": {"title": {"dtype": "string", "id": null, "_type": "Value"}, "content": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 5, "names": ["sports", "finance", "entertainment", "automobile", "technology"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "supervised_keys": null, "builder_name": "sogou__news", "config_name": "default", "version": {"version_str": "0.0.0", "description": null, "datasets_version_to_prepare": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 168645860, "num_examples": 60000, "dataset_name": "sogou__news"}, "train": {"name": "train", "num_bytes": 1257931136, "num_examples": 450000, "dataset_name": "sogou__news"}}, "download_checksums": {"https://s3.amazonaws.com/fast-ai-nlp/sogou_news_csv.tgz": {"num_bytes": 384269937, "checksum": "6b77fc935561d339b82aa552d7e31ea59eff492a494920579b3ce70604efb5c2"}}, "download_size": 384269937, "dataset_size": 1426576996, "size_in_bytes": 1810846933}}
dummy/0.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9738db98c7b7901ac227f213c53d0b47f37dec80d2bb697b0afe9d37372d6110
3
+ size 1636
sogou_news.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Sogou News"""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import csv
22
+ import os
23
+ import sys
24
+
25
+ import datasets
26
+
27
+
28
+ csv.field_size_limit(sys.maxsize)
29
+
30
+
31
+ _CITATION = """\
32
+ @misc{zhang2015characterlevel,
33
+ title={Character-level Convolutional Networks for Text Classification},
34
+ author={Xiang Zhang and Junbo Zhao and Yann LeCun},
35
+ year={2015},
36
+ eprint={1509.01626},
37
+ archivePrefix={arXiv},
38
+ primaryClass={cs.LG}
39
+ }
40
+ """
41
+
42
+ _DESCRIPTION = """\
43
+ The Sogou News dataset is a mixture of 2,909,551 news articles from the SogouCA and SogouCS news corpora, in 5 categories.
44
+ The number of training samples selected for each class is 90,000 and testing 12,000. Note that the Chinese characters have been converted to Pinyin.
45
+ classification labels of the news are determined by their domain names in the URL. For example, the news with
46
+ URL http://sports.sohu.com is categorized as a sport class.
47
+ """
48
+
49
+ _DATA_URL = "https://s3.amazonaws.com/fast-ai-nlp/sogou_news_csv.tgz"
50
+
51
+
52
+ class Sogou_News(datasets.GeneratorBasedBuilder):
53
+ """Sogou News dataset"""
54
+
55
+ def _info(self):
56
+ return datasets.DatasetInfo(
57
+ description=_DESCRIPTION,
58
+ features=datasets.Features(
59
+ {
60
+ "title": datasets.Value("string"),
61
+ "content": datasets.Value("string"),
62
+ "label": datasets.features.ClassLabel(
63
+ names=["sports", "finance", "entertainment", "automobile", "technology"]
64
+ ),
65
+ }
66
+ ),
67
+ # No default supervised_keys (as we have to pass both premise
68
+ # and hypothesis as input).
69
+ supervised_keys=None,
70
+ homepage="", # didn't find a real homepage
71
+ citation=_CITATION,
72
+ )
73
+
74
+ def _split_generators(self, dl_manager):
75
+ dl_dir = dl_manager.download_and_extract(_DATA_URL)
76
+
77
+ return [
78
+ datasets.SplitGenerator(
79
+ name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(dl_dir, "sogou_news_csv", "test.csv")}
80
+ ),
81
+ datasets.SplitGenerator(
82
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(dl_dir, "sogou_news_csv", "train.csv")}
83
+ ),
84
+ ]
85
+
86
+ def _generate_examples(self, filepath):
87
+ """This function returns the examples in the raw (text) form."""
88
+ with open(filepath, encoding="utf-8") as csv_file:
89
+ data = csv.reader(csv_file)
90
+ for id_, row in enumerate(data):
91
+ yield id_, {"title": row[1], "content": row[2], "label": int(row[0]) - 1}