File size: 5,054 Bytes
804b7b8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2167f08
804b7b8
 
 
60bb7ba
804b7b8
 
60bb7ba
 
 
804b7b8
8f54b52
 
 
 
 
804b7b8
 
 
 
 
 
 
 
7ae8760
804b7b8
 
 
 
 
 
df343b4
 
804b7b8
 
 
 
 
 
 
 
 
 
 
b9e6e05
 
 
 
 
 
804b7b8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5999331
 
2680243
804b7b8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NewsMTSC Dataset: (Multi-)Target-dependent Sentiment Classification in News Articles Dataset"""


import csv
import json
import os

import datasets

_CITATION = """\
@InProceedings{Hamborg2021b,
  author    = {Hamborg, Felix and Donnay, Karsten},
  title     = {NewsMTSC: (Multi-)Target-dependent Sentiment Classification in News Articles},
  booktitle = {Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics (EACL 2021)},
  year      = {2021},
  month     = {Apr.},
  location  = {Virtual Event},
}
"""
_DESCRIPTION = """\
NewsMTSC: A large, manually annotated dataset for target-dependent sentiment classification in English news articles.
"""
_HOMEPAGE = "https://github.com/fhamborg/NewsMTSC/"
_LICENSE = "MIT"
_URL = "https://raw.githubusercontent.com/fhamborg/NewsMTSC/6b838e00f54423c253806327a0ae24dbffa24c9e/NewsSentiment/experiments/default/datasets/"
_URLS = {
    "rw": {
        datasets.Split.TRAIN: _URL + "newsmtsc-rw-hf/train.jsonl",
        datasets.Split.VALIDATION: _URL + "newsmtsc-rw-hf/dev.jsonl",
        datasets.Split.TEST: _URL + "newsmtsc-rw-hf/test.jsonl",
    },
    "mt": {
        datasets.Split.TRAIN: _URL + "newsmtsc-mt-hf/train.jsonl",
        datasets.Split.VALIDATION: _URL + "newsmtsc-mt-hf/dev.jsonl",
        datasets.Split.TEST: _URL + "newsmtsc-mt-hf/test.jsonl",
    },
}


class AllowNoFurtherMentionsFeatures(datasets.Features):
    def encode_example(self, example):
        return super().encode_example(example)


class NewsSentimentNewsmtsc(datasets.GeneratorBasedBuilder):
    """NewsMTSC Dataset: A large, manually annotated dataset for target-dependent sentiment classification in political
    news articles."""

    VERSION = datasets.Version("1.0.0")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name="mt", version=VERSION, description="Multiple targets: each sentence contains two or more targets with individually labeled sentiment (in validation and test splits)"),
        datasets.BuilderConfig(name="rw", version=VERSION, description="Real world: distribution of sentiment classes resembles real-world distribution (in validation and test splits)"),
    ]

    DEFAULT_CONFIG_NAME = "rw"

    def _info(self):
        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # This defines the different columns of the dataset and their types
            features=AllowNoFurtherMentionsFeatures(
                {
                    "mention": datasets.Value("string"),
                    "polarity": datasets.Value("int32"),
                    "from": datasets.Value("int32"),
                    "to": datasets.Value("int32"),
                    "sentence": datasets.Value("string"),
                    "id": datasets.Value("string")
                },
            ),
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        urls = _URLS[self.config.name]
        data_dir = dl_manager.download(urls)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepath": data_dir[datasets.Split.TRAIN],
                    "split": "train",
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepath": data_dir[datasets.Split.TEST],
                    "split": "test"
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepath": data_dir[datasets.Split.VALIDATION],
                    "split": "dev",
                },
            ),
        ]

    def _generate_examples(self, filepath, split):
        with open(filepath, encoding="utf-8") as f:
            for row in f:
                data = json.loads(row)
                #if split == "test":
                #    data["polarity"] = None
                yield data["id"], data