File size: 4,726 Bytes
b8a6494
 
 
 
 
 
 
 
 
 
 
 
 
80a2ede
 
b8a6494
 
 
 
 
 
 
4ad5136
b8a6494
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80a2ede
b8a6494
 
 
 
 
 
 
80a2ede
b8a6494
 
 
 
 
 
 
80a2ede
b8a6494
 
 
 
 
80a2ede
b8a6494
 
 
 
80a2ede
b8a6494
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
"""FaQUAD-NLI dataset"""

import datasets
import pandas as pd
import json

_CITATION = """
"""

_DESCRIPTION = """
"""

_URLS = {
    "data": "https://raw.githubusercontent.com/liafacom/faquad/6ad978f20672bb41625b3b71fbe4a88b893d0a86/data/dataset.json",
    "spans": "https://huggingface.co/datasets/ruanchaves/faquad-nli/raw/main/spans.csv"
}

def check_overlap(interval1, interval2):
    """Check for overlap between two integer intervals"""
    return not (interval1[1] < interval2[0] or interval2[1] < interval1[0])


class Faquad(datasets.GeneratorBasedBuilder):

    VERSION = datasets.Version("1.0.0")
    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "document_index": datasets.Value("int32"),
                    "document_title": datasets.Value("string"),
                    "paragraph_index": datasets.Value("int32"),
                    "question": datasets.Value("string"),
                    "answer": datasets.Value("string"),
                    "label": datasets.Value("int32")
                }),
            supervised_keys=None,
            homepage="https://github.com/franciellevargas/HateBR",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        downloaded_files = dl_manager.download(_URLS)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "data": downloaded_files["data"],
                    "spans": downloaded_files["spans"],
                    "split": "train"
                }
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "data": downloaded_files["data"],
                    "spans": downloaded_files["spans"],
                    "split": "validation"
                }
            ),    
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "data": downloaded_files["data"],
                    "spans": downloaded_files["spans"],
                    "split": "test"
                }
            )        
        ]

    def _generate_examples(self, data, spans, split):

        with open(data, 'r') as f:
            json_data = json.load(f)
        
        spans = pd.read_csv(spans).to_dict("records")
        counter = 0
        for span_row in spans:
            if span_row["split"] != split:
                continue

            document_title = json_data["data"][
                span_row["document_index"]
            ]["title"]

            sentence = json_data["data"][
                span_row["document_index"]
            ]["paragraphs"][
                span_row["paragraph_index"]
            ]["context"][
                span_row["sentence_start_char"]:span_row["sentence_end_char"]
            ]
            sentence_interval = (span_row["sentence_start_char"], span_row["sentence_end_char"])

            for qas_row in json_data["data"][
                span_row["document_index"]
            ]["paragraphs"][
                span_row["paragraph_index"]
            ]["qas"]:
                question = qas_row["question"]
                question_spans = []
                for qas_answer in qas_row["answers"]:
                    qas_answer_start_span = qas_answer["answer_start"]
                    qas_answer_end_span = qas_answer["answer_start"] + len(qas_answer["text"])
                    question_spans.append((qas_answer_start_span, qas_answer_end_span))
                for question_interval in question_spans:
                    if check_overlap(sentence_interval, question_interval):
                        yield counter, {
                            "document_index": span_row["document_index"],
                            "document_title": document_title,
                            "paragraph_index": span_row["paragraph_index"],
                            "question": question,
                            "answer": sentence,
                            "label": 1
                        }
                        counter += 1
                        break
                else:
                    yield counter, {
                        "document_index": span_row["document_index"],
                        "document_title": document_title,
                        "paragraph_index": span_row["paragraph_index"],
                        "question": question,
                        "answer": sentence,
                        "label": 0
                    }
                    counter += 1