# coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """NewsMTSC Dataset: (Multi-)Target-dependent Sentiment Classification in News Articles Dataset""" import csv import json import os import datasets _CITATION = """\ @InProceedings{Hamborg2021b, author = {Hamborg, Felix and Donnay, Karsten}, title = {NewsMTSC: (Multi-)Target-dependent Sentiment Classification in News Articles}, booktitle = {Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics (EACL 2021)}, year = {2021}, month = {Apr.}, location = {Virtual Event}, } """ _DESCRIPTION = """\ NewsMTSC: A large, manually annotated dataset for target-dependent sentiment classification in English news articles. """ _HOMEPAGE = "https://github.com/fhamborg/NewsMTSC/" _LICENSE = "MIT" _URL = "https://raw.githubusercontent.com/fhamborg/NewsMTSC/a96f785fd3110c202e05e63579ddb30043eef128/NewsSentiment/experiments/default/datasets/" _URLS = { "mt": { datasets.Split.TRAIN: _URL + "newsmtsc-mt/train.jsonl", datasets.Split.VALIDATION: _URL + "newsmtsc-mt/dev.jsonl", datasets.Split.TEST: _URL + "newsmtsc-mt/test.jsonl", }, "rw": { datasets.Split.TRAIN: _URL + "newsmtsc-rw/train.jsonl", datasets.Split.VALIDATION: _URL + "newsmtsc-rw/dev.jsonl", datasets.Split.TEST: _URL + "newsmtsc-rw/test.jsonl", }, } class AllowNoFurtherMentionsFeatures(datasets.Features): def encode_example(self, example): for target in example["targets"]: if "further_mentions" not in target: target["further_mentions"] = [] return super().encode_example(example) class NewsSentimentNewsmtsc(datasets.GeneratorBasedBuilder): """NewsMTSC Dataset: A large, manually annotated dataset for target-dependent sentiment classification in political news articles.""" VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ datasets.BuilderConfig(name="mt", version=VERSION, description="Multi-target: sentences that contain at least two target mentions."), datasets.BuilderConfig(name="rw", version=VERSION, description="Real-world: aims that sentences resemble real-world distribution as to sentiment and other factors mentioned in the paper"), ] DEFAULT_CONFIG_NAME = "rw" def _info(self): return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=_DESCRIPTION, # This defines the different columns of the dataset and their types features=AllowNoFurtherMentionsFeatures( { "primary_gid": datasets.Value("string"), "sentence_normalized": datasets.Value("string"), "targets": datasets.features.Sequence( { "Input.gid": datasets.Value("string"), "from": datasets.Value("uint32"), "to": datasets.Value("uint32"), "mention": datasets.Value("string"), "polarity": datasets.Value("float"), "further_mentions": datasets.features.Sequence( { "from": datasets.Value("uint32"), "to": datasets.Value("uint32"), "mention": datasets.Value("string"), }, ), }, ), }, ), homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): urls = _URLS[self.config.name] data_dir = dl_manager.download(urls) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": data_dir[datasets.Split.TRAIN], "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": data_dir[datasets.Split.TEST], "split": "test" }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": data_dir[datasets.Split.VALIDATION], "split": "dev", }, ), ] def _generate_examples(self, filepath, split): with open(filepath, encoding="utf-8") as f: for row in f: data = json.loads(row) if split == "test": for target in data["targets"]: target["polarity"] = None yield data["primary_gid"], data