File size: 4,195 Bytes
9918791
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
# coding=utf-8
# Copyright 2020 HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Lint as: python3
"""DanFEVER: A FEVER dataset for Danish"""

import csv
import os

import datasets


logger = datasets.logging.get_logger(__name__)


_CITATION = """\
@inproceedings{norregaard-derczynski-2021-danfever,
    title = "{D}an{FEVER}: claim verification dataset for {D}anish",
    author = "N{\o}rregaard, Jeppe  and
      Derczynski, Leon",
    booktitle = "Proceedings of the 23rd Nordic Conference on Computational Linguistics (NoDaLiDa)",
    month = may # " 31--2 " # jun,
    year = "2021",
    address = "Reykjavik, Iceland (Online)",
    publisher = {Link{\"o}ping University Electronic Press, Sweden},
    url = "https://aclanthology.org/2021.nodalida-main.47",
    pages = "422--428",
    abstract = "We present a dataset, DanFEVER, intended for multilingual misinformation research. The dataset is in Danish and has the same format as the well-known English FEVER dataset. It can be used for testing methods in multilingual settings, as well as for creating models in production for the Danish language.",
}
"""

_DESCRIPTION = """\

"""

_URL = "https://media.githubusercontent.com/media/StrombergNLP/danfever/main/tsv/da_fever.tsv"


class DanFeverConfig(datasets.BuilderConfig):
    """BuilderConfig for DanFever"""

    def __init__(self, **kwargs):
        """BuilderConfig DanFever.

        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(DanFeverConfig, self).__init__(**kwargs)


class DanFever(datasets.GeneratorBasedBuilder):
    """DanFever dataset."""

    BUILDER_CONFIGS = [
        DanFeverConfig(name="DanFever", version=datasets.Version("1.0.0"), description="FEVER dataset for Danish"),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "claim": datasets.Value("string"),
                    "label": datasets.features.ClassLabel(
                        names=[
                            "Refuted",
                            "Supported",
                            "NotEnoughInfo",
                        ]
                    ),
                    "evidence_extract": datasets.Value("string"),
                    "verifiable": datasets.features.ClassLabel(
                        names=[
                            "NotVerifiable",
                            "Verifiable",
                        ]
                    ),
                    "evidence": datasets.Value("string"),
                    "original_id": datasets.Value("string"),

                }
            ),
            supervised_keys=None,
            homepage="https://stromberg.ai/publication/danfever/",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        downloaded_file = dl_manager.download_and_extract(_URL)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_file}),
        ]

    def _generate_examples(self, filepath):
        logger.info("⏳ Generating examples from = %s", filepath)
        with open(filepath, encoding="utf-8") as f:
            data_reader = csv.DictReader(f, delimiter="\t", quotechar='"')
            guid = 0
            for instance in data_reader:
                instance.pop('nr.')
                instance["original_id"] = instance.pop('id')
                instance["id"] = str(guid)
                yield guid, instance
                guid += 1