Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
a2eec3d
1 Parent(s): fcc40c4

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (0501e18cf60f0eb30f2144e3627359e6c85ae4f3)
- Delete loading script (98175af0c6bcd9d697a07833f18ed082fa185f9e)

README.md CHANGED
@@ -152,19 +152,30 @@ dataset_info:
152
  '2': Positive
153
  splits:
154
  - name: train
155
- num_bytes: 14595163
156
  num_examples: 3355
157
  - name: test_random
158
- num_bytes: 2629500
159
  num_examples: 579
160
  - name: test_fixed
161
- num_bytes: 3881800
162
  num_examples: 827
163
  - name: validation
164
- num_bytes: 2322922
165
  num_examples: 578
166
- download_size: 23117196
167
- dataset_size: 23429385
 
 
 
 
 
 
 
 
 
 
 
168
  ---
169
 
170
  # Dataset Card for PerSenT
 
152
  '2': Positive
153
  splits:
154
  - name: train
155
+ num_bytes: 14595147
156
  num_examples: 3355
157
  - name: test_random
158
+ num_bytes: 2629484
159
  num_examples: 579
160
  - name: test_fixed
161
+ num_bytes: 3881784
162
  num_examples: 827
163
  - name: validation
164
+ num_bytes: 2322906
165
  num_examples: 578
166
+ download_size: 14120693
167
+ dataset_size: 23429321
168
+ configs:
169
+ - config_name: default
170
+ data_files:
171
+ - split: train
172
+ path: data/train-*
173
+ - split: test_random
174
+ path: data/test_random-*
175
+ - split: test_fixed
176
+ path: data/test_fixed-*
177
+ - split: validation
178
+ path: data/validation-*
179
  ---
180
 
181
  # Dataset Card for PerSenT
data/test_fixed-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa009d6d8f0c7919b247496fc2a6ff7287ec68398adc46d61cc6c2e04c3c8af1
3
+ size 2265060
data/test_random-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac4a65f13ca3d875f112de30348f0bd20ca8ae86feb12ce4fd3fb4d6487fa080
3
+ size 1600013
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aca39ef7b24fd861d5a0101458a600472c164a2451b493c8480a4a731cb7f776
3
+ size 8843903
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6517edfdcaaa1fcafd5091315ee38b2dd02926c2c248daccd6b654f8e1ad9de5
3
+ size 1411717
per_sent.py DELETED
@@ -1,149 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ **Person SenTiment, a challenge dataset for author sentiment prediction in the news domain **
16
-
17
- PerSenT is a crowd-sourced dataset that captures the sentiment of an author towards the main entity in a news article. This dataset contains annotation for 5.3k documents and 38k paragraphs covering 3.2k unique entities.
18
-
19
- """
20
-
21
-
22
- import csv
23
-
24
- import datasets
25
- from datasets.splits import NamedSplit
26
-
27
-
28
- # TODO: Add BibTeX citation
29
- # Find for instance the citation on arxiv or on the dataset repo/website
30
- _CITATION = """\
31
- @inproceedings{bastan2020authors,
32
- title={Author's Sentiment Prediction},
33
- author={Mohaddeseh Bastan and Mahnaz Koupaee and Youngseo Son and Richard Sicoli and Niranjan Balasubramanian},
34
- year={2020},
35
- eprint={2011.06128},
36
- archivePrefix={arXiv},
37
- primaryClass={cs.CL}
38
- }
39
- """
40
-
41
- _DESCRIPTION = """\
42
- Person SenTiment (PerSenT) is a crowd-sourced dataset that captures the sentiment of an author towards the main entity in a news article. This dataset contains annotation for 5.3k documents and 38k paragraphs covering 3.2k unique entities.
43
-
44
- The dataset consists of sentiment annotations on news articles about people. For each article, annotators judge what the author’s sentiment is towards the main (target) entity of the article. The annotations also include similar judgments on paragraphs within the article.
45
-
46
- To split the dataset, entities into 4 mutually exclusive sets. Due to the nature of news collections, some entities tend to dominate the collection. In the collection, there were four entities which were the main entity in nearly 800 articles. To avoid these entities from dominating the train or test splits, we moved them to a separate test collection. We split the remaining into a training, dev, and test sets at random. Thus our collection includes one standard test set consisting of articles drawn at random (Test Standard -- `test_random`), while the other is a test set which contains multiple articles about a small number of popular entities (Test Frequent -- `test_fixed`).
47
- """
48
-
49
- _LICENSE = "Creative Commons Attribution 4.0 International License"
50
-
51
- _URLs = {
52
- "train": "https://raw.githubusercontent.com/MHDBST/PerSenT/main/train.csv",
53
- "dev": "https://raw.githubusercontent.com/MHDBST/PerSenT/main/dev.csv",
54
- "test_random": "https://raw.githubusercontent.com/MHDBST/PerSenT/main/random_test.csv",
55
- "test_fixed": "https://raw.githubusercontent.com/MHDBST/PerSenT/main/fixed_test.csv",
56
- }
57
-
58
-
59
- class PerSent(datasets.GeneratorBasedBuilder):
60
- """Person SenTiment (PerSenT) is a crowd-sourced dataset that captures the sentiment of an author towards the main entity in a news article. This dataset contains annotations for 5.3k documents and 38k paragraphs covering 3.2k unique entities."""
61
-
62
- VERSION = datasets.Version("1.1.0")
63
- LABELS = ["Negative", "Neutral", "Positive"]
64
- LABEL_COLS = ["TRUE_SENTIMENT"] + ["Paragraph" + str(i) for i in range(16)]
65
-
66
- def _info(self):
67
- label = datasets.features.ClassLabel(names=self.LABELS)
68
- feature_dict = {
69
- "DOCUMENT_INDEX": datasets.Value("int64"),
70
- "TITLE": datasets.Value("string"),
71
- "TARGET_ENTITY": datasets.Value("string"),
72
- "DOCUMENT": datasets.Value("string"),
73
- "MASKED_DOCUMENT": datasets.Value("string"),
74
- }
75
- feature_dict.update({k: label for k in self.LABEL_COLS})
76
-
77
- return datasets.DatasetInfo(
78
- description=_DESCRIPTION,
79
- features=datasets.Features(feature_dict),
80
- supervised_keys=None,
81
- homepage="https://stonybrooknlp.github.io/PerSenT",
82
- license=_LICENSE,
83
- citation=_CITATION,
84
- )
85
-
86
- def _split_generators(self, dl_manager):
87
- """Returns SplitGenerators."""
88
- train_path = dl_manager.download(_URLs["train"])
89
- dev_path = dl_manager.download(_URLs["dev"])
90
- test_fixed_path = dl_manager.download(_URLs["test_fixed"])
91
- test_random_path = dl_manager.download(_URLs["test_random"])
92
-
93
- return [
94
- datasets.SplitGenerator(
95
- name=datasets.Split.TRAIN,
96
- # These kwargs will be passed to _generate_examples
97
- gen_kwargs={
98
- "filepath": train_path,
99
- "split": "train",
100
- },
101
- ),
102
- datasets.SplitGenerator(
103
- name=NamedSplit("test_random"),
104
- # These kwargs will be passed to _generate_examples
105
- gen_kwargs={"filepath": test_random_path, "split": "test_random"},
106
- ),
107
- datasets.SplitGenerator(
108
- name=NamedSplit("test_fixed"),
109
- # These kwargs will be passed to _generate_examples
110
- gen_kwargs={"filepath": test_fixed_path, "split": "test_fixed"},
111
- ),
112
- datasets.SplitGenerator(
113
- name=datasets.Split.VALIDATION,
114
- # These kwargs will be passed to _generate_examples
115
- gen_kwargs={
116
- "filepath": dev_path,
117
- "split": "dev",
118
- },
119
- ),
120
- ]
121
-
122
- def _generate_examples(self, filepath, split):
123
- """Yields examples.
124
-
125
- For examples with missing labels (empty strings in the original files), we replace with -1.
126
- """
127
-
128
- with open(filepath, encoding="utf-8") as f:
129
- reader = csv.reader(f)
130
-
131
- # Header
132
- _ = next(reader)
133
-
134
- for id_, row in enumerate(reader):
135
- doc_idx, title, target, doc, masked_doc, *labels = row
136
-
137
- # Replace missing labels with -1
138
- labels = [label if label in self.LABELS else -1 for label in labels]
139
-
140
- example = {
141
- "DOCUMENT_INDEX": doc_idx,
142
- "TITLE": title,
143
- "TARGET_ENTITY": target,
144
- "DOCUMENT": doc,
145
- "MASKED_DOCUMENT": masked_doc,
146
- }
147
- example.update(dict(zip(self.LABEL_COLS, labels)))
148
-
149
- yield id_, example