Datasets:
Tasks:
Text Generation
Modalities:
Text
Sub-tasks:
language-modeling
Languages:
Japanese
Size:
10K - 100K
ArXiv:
Tags:
question-generation
License:
init
Browse files- data/processed/test00.jsonl +0 -0
- data/processed/test01.jsonl +0 -0
- data/processed/test02.jsonl +0 -0
- data/processed/test03.jsonl +0 -0
- data/processed/train00.jsonl +0 -0
- data/processed/train01.jsonl +0 -0
- data/processed/train02.jsonl +0 -0
- data/processed/train03.jsonl +0 -0
- data/processed/train04.jsonl +0 -0
- data/processed/train05.jsonl +0 -0
- data/processed/train06.jsonl +0 -0
- data/processed/train07.jsonl +0 -0
- data/processed/train08.jsonl +0 -0
- data/processed/train09.jsonl +0 -0
- data/processed/train10.jsonl +0 -0
- data/processed/train11.jsonl +0 -0
- data/processed/train12.jsonl +0 -0
- data/processed/train13.jsonl +0 -0
- data/processed/train14.jsonl +0 -0
- data/processed/train15.jsonl +0 -0
- data/processed/train16.jsonl +0 -0
- data/processed/train17.jsonl +0 -0
- data/processed/train18.jsonl +0 -0
- data/processed/train19.jsonl +0 -0
- data/processed/train20.jsonl +0 -0
- data/processed/train21.jsonl +0 -0
- data/processed/train22.jsonl +0 -0
- data/processed/train23.jsonl +0 -0
- data/processed/train24.jsonl +0 -0
- data/processed/train25.jsonl +0 -0
- data/processed/train26.jsonl +0 -0
- data/processed/train27.jsonl +0 -0
- data/processed/validation00.jsonl +0 -0
- data/processed/validation01.jsonl +0 -0
- data/processed/validation02.jsonl +0 -0
- data/processed/validation03.jsonl +0 -0
- generate_reference_files.py +7 -0
- process.py +15 -17
- qg_jaquad.py +0 -3
data/processed/test00.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/test01.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/test02.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/test03.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train00.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train01.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train02.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train03.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train04.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train05.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train06.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train07.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train08.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train09.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train10.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train11.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train12.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train13.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train14.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train15.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train16.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train17.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train18.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train19.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train20.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train21.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train22.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train23.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train24.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train25.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train26.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/train27.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/validation00.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/validation01.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/validation02.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/validation03.jsonl
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
generate_reference_files.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from datasets import load_dataset
|
3 |
+
|
4 |
+
os.makedirs('./reference_files', exist_ok=True)
|
5 |
+
dataset = load_dataset('asahi417/qg_jaquad')
|
6 |
+
|
7 |
+
for _type in ['validation', 'test']:
|
process.py
CHANGED
@@ -21,19 +21,19 @@ def get_sentence(document: str):
|
|
21 |
|
22 |
def process_single_data(data: Dict):
|
23 |
""" Convert single raw json data into QG format """
|
24 |
-
example = {'question': data["question"], '
|
25 |
|
26 |
# check answer
|
27 |
answer_text = data['answers']['text'][0]
|
28 |
answer_start = data['answers']['answer_start'][0]
|
29 |
answer_end = answer_start + len(answer_text)
|
30 |
-
assert example['
|
31 |
example['answer'] = answer_text
|
32 |
|
33 |
# get sentence
|
34 |
-
position = example['
|
35 |
assert position != -1
|
36 |
-
before_tmp = get_sentence(example['
|
37 |
if len(before_tmp) == 0:
|
38 |
before = ''
|
39 |
before_sentence = ''
|
@@ -44,7 +44,7 @@ def process_single_data(data: Dict):
|
|
44 |
else:
|
45 |
before = ' '.join(before_tmp[:-1])
|
46 |
before_sentence = before_tmp[-1]
|
47 |
-
after_tmp = get_sentence(example['
|
48 |
if len(after_tmp) == 0:
|
49 |
after = ''
|
50 |
after_sentence = ''
|
@@ -53,27 +53,25 @@ def process_single_data(data: Dict):
|
|
53 |
after_sentence = after_tmp[0]
|
54 |
example['sentence'] = '{}{}{}'.format(before_sentence, example['answer'], after_sentence)
|
55 |
|
56 |
-
# get
|
57 |
source_text = '{0}{1}{2}{1}{3}'.format(before, HIGHLIGHT_TOKEN, example['sentence'], after)
|
58 |
-
example['
|
59 |
|
60 |
-
# get
|
61 |
source_text = '{0}{1}{2}{1}{3}'.format(
|
62 |
-
example['
|
63 |
-
example['
|
64 |
-
example['
|
65 |
|
66 |
# get sentence_answer
|
67 |
-
|
68 |
-
if len(before) == 0 or before[-1].endswith('。'):
|
69 |
before = ''
|
70 |
else:
|
71 |
-
before =
|
72 |
-
|
73 |
-
if len(after) == 0:
|
74 |
after = ''
|
75 |
else:
|
76 |
-
after =
|
77 |
source_text = '{0}{1}{2}{1}{3}'.format(before, HIGHLIGHT_TOKEN, example['answer'], after)
|
78 |
example['sentence_answer'] = re.sub(r'\s+', ' ', source_text)
|
79 |
for _k in example.keys():
|
|
|
21 |
|
22 |
def process_single_data(data: Dict):
|
23 |
""" Convert single raw json data into QG format """
|
24 |
+
example = {'question': data["question"], 'paragraph': data["context"]}
|
25 |
|
26 |
# check answer
|
27 |
answer_text = data['answers']['text'][0]
|
28 |
answer_start = data['answers']['answer_start'][0]
|
29 |
answer_end = answer_start + len(answer_text)
|
30 |
+
assert example['paragraph'][answer_start: answer_end] == answer_text
|
31 |
example['answer'] = answer_text
|
32 |
|
33 |
# get sentence
|
34 |
+
position = example['paragraph'].find(example['answer'])
|
35 |
assert position != -1
|
36 |
+
before_tmp = get_sentence(example['paragraph'][:position])
|
37 |
if len(before_tmp) == 0:
|
38 |
before = ''
|
39 |
before_sentence = ''
|
|
|
44 |
else:
|
45 |
before = ' '.join(before_tmp[:-1])
|
46 |
before_sentence = before_tmp[-1]
|
47 |
+
after_tmp = get_sentence(example['paragraph'][position + len(example['answer']):])
|
48 |
if len(after_tmp) == 0:
|
49 |
after = ''
|
50 |
after_sentence = ''
|
|
|
53 |
after_sentence = after_tmp[0]
|
54 |
example['sentence'] = '{}{}{}'.format(before_sentence, example['answer'], after_sentence)
|
55 |
|
56 |
+
# get paragraph_sentence
|
57 |
source_text = '{0}{1}{2}{1}{3}'.format(before, HIGHLIGHT_TOKEN, example['sentence'], after)
|
58 |
+
example['paragraph_sentence'] = re.sub(r'\s+', ' ', source_text)
|
59 |
|
60 |
+
# get paragraph_answer
|
61 |
source_text = '{0}{1}{2}{1}{3}'.format(
|
62 |
+
example['paragraph'][:position], HIGHLIGHT_TOKEN, example['answer'],
|
63 |
+
example['paragraph'][position + len(example['answer']):])
|
64 |
+
example['paragraph_answer'] = re.sub(r'\s+', ' ', source_text)
|
65 |
|
66 |
# get sentence_answer
|
67 |
+
if len(before_tmp) == 0 or before_tmp[-1].endswith('。'):
|
|
|
68 |
before = ''
|
69 |
else:
|
70 |
+
before = before_tmp[-1]
|
71 |
+
if len(after_tmp) == 0:
|
|
|
72 |
after = ''
|
73 |
else:
|
74 |
+
after = after_tmp[0]
|
75 |
source_text = '{0}{1}{2}{1}{3}'.format(before, HIGHLIGHT_TOKEN, example['answer'], after)
|
76 |
example['sentence_answer'] = re.sub(r'\s+', ' ', source_text)
|
77 |
for _k in example.keys():
|
qg_jaquad.py
CHANGED
@@ -44,9 +44,6 @@ class QGJaquad(datasets.GeneratorBasedBuilder):
|
|
44 |
}
|
45 |
),
|
46 |
supervised_keys=None,
|
47 |
-
# task_templates=[
|
48 |
-
# Summarization(task='question generation', text_column="passage_answer", summary_column='question')
|
49 |
-
# ],
|
50 |
homepage="https://github.com/asahi417/lm-question-generation"
|
51 |
)
|
52 |
|
|
|
44 |
}
|
45 |
),
|
46 |
supervised_keys=None,
|
|
|
|
|
|
|
47 |
homepage="https://github.com/asahi417/lm-question-generation"
|
48 |
)
|
49 |
|