Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
unknown
Language Creators:
found
Annotations Creators:
machine-generated
Source Datasets:
extended|qa_srl
ArXiv:
License:
File size: 5,868 Bytes
763edf7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70f29c2
763edf7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
# -*- coding: utf-8 -*-
"""LSOIE: A Large-Scale Dataset for Supervised Open Information Extraction."""
import os
import datasets
from datasets.info import SupervisedKeysData
from zipfile import ZipFile
logger = datasets.logging.get_logger(__name__)


_CITATION = """\
@article{lsoie-2021,
  title={{LSOIE}: A Large-Scale Dataset for Supervised Open Information Extraction},
  author={{Solawetz}, Jacob and {Larson}, Stefan},
  journal={arXiv preprint arXiv:2101.11177},
  year={2019},
  url="https://arxiv.org/pdf/2101.11177.pdf"
}
"""

_DESCRIPTION = """
The Large Scale Open Information Extraction Dataset (LSOIE), is a dataset 20 
times larger than the next largest human-annotated Open Information Extraction
(OIE) dataset. LSOIE is a built upon the QA-SRL 2.0 dataset.
"""

_URL = "https://github.com/Jacobsolawetz/large-scale-oie/"
_URLS = {
    "zip": _URL+"raw/master/dataset_creation/lsoie_data/lsoie_data.zip"
}
_ARCHIVE_FILES = [
    "lsoie_science_train.conll",
    "lsoie_science_dev.conll",
    "lsoie_science_test.conll",
    "lsoie_wiki_train.conll",
    "lsoie_wiki_dev.conll",
    "lsoie_wiki_test.conll",
    ]


class LsoieConfig(datasets.BuilderConfig):
    """BuilderConfig for LSOIE."""

    def __init__(self,subset="wiki", **kwargs):
        """BuilderConfig for LSOIE.
        Args:
          subset: str - either "wiki" or "science"
          **kwargs: keyword arguments forwarded to super.
        """
        super(LsoieConfig, self).__init__(**kwargs)
        self.subset=subset


class Lsoie(datasets.GeneratorBasedBuilder):
    """LSOIE: A Large-Scale Dataset for Supervised Open Information Extraction"""

    BUILDER_CONFIGS = [
        LsoieConfig(
            name="wiki",
            description="LSOIE dataset from wikipedia and wikinews",
            subset="wiki",
        ),
        LsoieConfig(
            name="sci",
            description="LSOIE dataset build over scientific domain",
            subset="science",
        ),
    ]
    
    DEFAULT_CONFIG_NAME = "wiki"

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "word_ids": datasets.Sequence(datasets.Value("int16")),
                    "words": datasets.Sequence(datasets.Value("string")),
                    "pred": datasets.Value("string"),
                    "pred_ids": datasets.Sequence(datasets.Value("int16")),
                    "head_pred_id": datasets.Value("int16"),
                    "sent_id": datasets.Value("int16"),
                    "run_id": datasets.Value("int16"),
                    "label": datasets.Sequence(datasets.Value("string")),
                }
            ),
            supervised_keys=SupervisedKeysData(input="word_ids",output="label"),
            homepage=_URL,
            citation=_CITATION,
            #there is no default task for open information extraction yet
            #task_templates=[
            #    OpenInformationExtraction(
            #        question_column="question", context_column="context", answers_column="answers"
            #    )
            #],
        )

    def _split_generators(self, dl_manager):
        downloaded_archive = dl_manager.download(_URLS)['zip']          
        #name_pre=os.path.join("lsoie_data","lsoie_")+self.config.subset+"_"
        name_pre="lsoie_"+self.config.subset+"_"
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, 
                                    gen_kwargs={
                                        "archive_path": downloaded_archive,
                                        "file_name": name_pre+"train.conll",
                                        }),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, 
                                    gen_kwargs={
                                        "archive_path": downloaded_archive,
                                        "file_name": name_pre+"dev.conll",
                                        }),
            datasets.SplitGenerator(name=datasets.Split.TEST, 
                                    gen_kwargs={
                                        "archive_path": downloaded_archive,
                                        "file_name": name_pre+"test.conll",
                                        }),
        ]
                        
    def _generate_examples(self,archive_path,file_name):
        """This functions returns the samples in a raw format"""
        logger.info("generating examples from archive:{}".format(archive_path))
        columns={'word_ids':int,
                 'words':str, 
                 'pred':str,
                 'pred_ids':lambda x: [ num for num in x.strip('[]').split(',')],
                 'head_pred_id': int,
                 'sent_id':int,
                 'run_id': int,
                 'label':str}
        list_columns=["word_ids","words","label"]
        sep="\t"
        key=0
        sentence=dict()
        for column in list_columns:
            sentence[column]=[]
        with ZipFile(archive_path) as zipfile:
            with zipfile.open('lsoie_data/'+file_name,mode='r') as file:
                for line in file:
                    line=line.decode("utf-8").strip('\n').split(sep=sep)
                    if line[0]=='':
                        yield key, sentence
                        key+=1
                        for column in list_columns:
                            sentence[column]=[]
                        continue
                    for column, val in zip(columns.keys(),line):
                        val=columns[column](val)
                        if column in list_columns:
                            sentence[column].append(val)
                        else:
                            sentence[column]=val