dd123 commited on
Commit
5f7033b
1 Parent(s): 5817359

Create test_data.py

Browse files
Files changed (1) hide show
  1. test_data.py +87 -0
test_data.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """test_data dataset."""
16
+
17
+
18
+ import csv
19
+
20
+ import datasets
21
+ from datasets.tasks import TextClassification
22
+
23
+
24
+ _DESCRIPTION = """"""
25
+
26
+ _HOMEPAGE = "https://github.com/freeziyou/test_data"
27
+
28
+ _LICENSE = "Creative Commons Attribution 4.0 International"
29
+
30
+ _TRAIN_DOWNLOAD_URL = (
31
+ "https://raw.githubusercontent.com/freeziyou/test_data/main/train.csv"
32
+ )
33
+ _TEST_DOWNLOAD_URL = "https://raw.githubusercontent.com/freeziyou/test_data/main/test.csv"
34
+
35
+
36
+ class test_data(datasets.GeneratorBasedBuilder):
37
+ """test_data dataset."""
38
+
39
+ VERSION = datasets.Version("1.1.0")
40
+
41
+ def _info(self):
42
+ features = datasets.Features(
43
+ {
44
+ "text": datasets.Value("string"),
45
+ "label": datasets.features.ClassLabel(
46
+ names=[
47
+ "none",
48
+ "like",
49
+ "unlike",
50
+ "hope",
51
+ "questioning",
52
+ "express_surprise",
53
+ "normal_interaction",
54
+ "express_sad",
55
+ "tease",
56
+ "meme",
57
+ "express_abashed"
58
+ ]),
59
+ }
60
+ )
61
+ return datasets.DatasetInfo(
62
+ description=_DESCRIPTION,
63
+ features=features,
64
+ supervised_keys=None,
65
+ homepage=_HOMEPAGE,
66
+ license=_LICENSE,
67
+ task_templates=[TextClassification(text_column="text", label_column="label")],
68
+ )
69
+
70
+ def _split_generators(self, dl_manager):
71
+ """Returns SplitGenerators."""
72
+ train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
73
+ test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
74
+ return [
75
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
76
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
77
+ ]
78
+
79
+ def _generate_examples(self, filepath):
80
+ """Yields examples as (key, example) tuples."""
81
+ with open(filepath, encoding="utf-8") as f:
82
+ csv_reader = csv.reader(f, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True)
83
+ # call next to skip header
84
+ next(csv_reader)
85
+ for id_, row in enumerate(csv_reader):
86
+ text, label = row
87
+ yield id_, {"text": text, "label": label}