# coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """test_data dataset.""" import csv import datasets from datasets.tasks import TextClassification _DESCRIPTION = """""" _HOMEPAGE = "https://gitee.com/didi233/test_date_gitee" _LICENSE = "Creative Commons Attribution 4.0 International" # _TRAIN_DOWNLOAD_URL = "https://raw.githubusercontent.com/freeziyou/live_stream_dataset/main/train.csv" _TRAIN_DOWNLOAD_URL = "https://gitee.com/didi233/test_date_gitee/raw/master/train.csv" # _TEST_DOWNLOAD_URL = "https://raw.githubusercontent.com/freeziyou/live_stream_dataset/main/test.csv" _TEST_DOWNLOAD_URL = "https://gitee.com/didi233/test_date_gitee/raw/master/test.csv" class test_data_huggingface(datasets.GeneratorBasedBuilder): """test_data dataset.""" VERSION = datasets.Version("1.2.0") def _info(self): features = datasets.Features( { "text": datasets.Value("string"), "label": datasets.features.ClassLabel( names=[ "none", "like", "unlike", "hope", "questioning", "express_surprise", "normal_interaction", "express_sad", "tease", "meme", "express_abashed" ]) } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, task_templates=[TextClassification(text_column="text", label_column="label")], ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL) test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}), ] def _generate_examples(self, filepath): """Yields examples as (key, example) tuples.""" with open(filepath, encoding="utf-8") as f: csv_reader = csv.reader(f, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True) # call next to skip header next(csv_reader) for id_, row in enumerate(csv_reader): text, label = row yield id_, {"text": text, "label": label}