File size: 2,814 Bytes
311d3f0
388dea8
655e03b
388dea8
 
311d3f0
 
388dea8
 
 
 
 
 
 
 
655e03b
388dea8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
afb5c76
c2de592
afb5c76
388dea8
afb5c76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
388dea8
 
afb5c76
388dea8
 
 
afb5c76
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
from datasets import DatasetInfo, SplitGenerator, ClassLabel, Sequence, Split, GeneratorBasedBuilder
from datasets.features import Features, Value

import json


class WikiHowNFQADataset(GeneratorBasedBuilder):
    VERSION = "1.0.0"

    def _info(self):
        features = Features({
            'article_id': Value('int32'),
            'question': Value('string'),
            'answer': Value('string'),
            'related_document_urls_wayback_snapshots': Sequence(Value('string')),
            'split': ClassLabel(names=['train', 'valid', 'test']),
            'cluster': Value('int32'),
        })

        return DatasetInfo(
            description="WikiHowNFQA dataset",
            features=features,
            homepage="https://huggingface.co/datasets/Lurunchik/WikiHowNFQA",
            citation="""@inproceedings{bolotova2023wikihowqa,
      title={WikiHowQA: A Comprehensive Benchmark for Multi-Document Non-Factoid Question Answering}, 
      author={Bolotova, Valeriia and Blinov, Vladislav and Filippova, Sofya and Scholer, Falk and Sanderson, Mark},
      booktitle="Proceedings of the 61th Conference of the Association for Computational Linguistics",
      year={2023}
}"""
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        downloaded_file = dl_manager.download_and_extract('https://huggingface.co/datasets/Lurunchik/WikiHowNFQA/resolve/main/WikiHowNFQA.jsonl')

        return [
            SplitGenerator(
                name=Split.TRAIN,
                gen_kwargs={
                    "filepath": downloaded_file,
                    "split": "train",
                },
            ),
            SplitGenerator(
                name=Split.VALIDATION,
                gen_kwargs={
                    "filepath": downloaded_file,
                    "split": "valid",
                },
            ),
            SplitGenerator(
                name=Split.TEST,
                gen_kwargs={
                    "filepath": downloaded_file,
                    "split": "test",
                },
            ),
        ]

    def _generate_examples(self, filepath, split):
        with open(filepath, encoding="utf-8") as f:
            for id_, line in enumerate(f):
                data = json.loads(line)
                if data['split'] == split:
                    yield id_, {
                        'article_id': data['article_id'],
                        'question': data['question'],
                        'answer': data['answer'],
                        'related_document_urls_wayback_snapshots': data['related_document_urls_wayback_snapshots'],
                        'split': data['split'],
                        'cluster': data['cluster'],
                    }


WikiHowNFQADataset().download_and_prepare()