Datasets:
Tasks:
Text Generation
Modalities:
Text
Sub-tasks:
language-modeling
Languages:
English
Size:
10K - 100K
ArXiv:
Tags:
question-generation
License:
parquet-converter
commited on
Commit
•
e7d7498
1
Parent(s):
440df90
Update parquet files
Browse files- .gitattributes +0 -82
- README.md +0 -85
- all/qg_squadshifts-test.parquet +3 -0
- all/qg_squadshifts-train.parquet +3 -0
- all/qg_squadshifts-validation.parquet +3 -0
- amazon/qg_squadshifts-test.parquet +3 -0
- data/processed/amazon.test02.jsonl → amazon/qg_squadshifts-train.parquet +2 -2
- data/processed/amazon.test03.jsonl → amazon/qg_squadshifts-validation.parquet +2 -2
- data/processed/amazon.train00.jsonl +0 -3
- data/processed/amazon.train01.jsonl +0 -3
- data/processed/amazon.train02.jsonl +0 -3
- data/processed/amazon.validation00.jsonl +0 -3
- data/processed/amazon.validation01.jsonl +0 -3
- data/processed/new_wiki.test00.jsonl +0 -3
- data/processed/new_wiki.test01.jsonl +0 -3
- data/processed/new_wiki.test02.jsonl +0 -3
- data/processed/new_wiki.train00.jsonl +0 -3
- data/processed/new_wiki.train01.jsonl +0 -3
- data/processed/new_wiki.validation00.jsonl +0 -3
- data/processed/nyt.test00.jsonl +0 -3
- data/processed/nyt.test01.jsonl +0 -3
- data/processed/nyt.test02.jsonl +0 -3
- data/processed/nyt.test03.jsonl +0 -3
- data/processed/nyt.train00.jsonl +0 -3
- data/processed/nyt.train01.jsonl +0 -3
- data/processed/nyt.train02.jsonl +0 -3
- data/processed/nyt.validation00.jsonl +0 -3
- data/processed/nyt.validation01.jsonl +0 -3
- data/processed/reddit.test00.jsonl +0 -3
- data/processed/reddit.test01.jsonl +0 -3
- data/processed/reddit.test02.jsonl +0 -3
- data/processed/reddit.test03.jsonl +0 -3
- data/processed/reddit.train00.jsonl +0 -3
- data/processed/reddit.train01.jsonl +0 -3
- data/processed/reddit.train02.jsonl +0 -3
- data/processed/reddit.validation00.jsonl +0 -3
- data/processed/reddit.validation01.jsonl +0 -3
- data/processed/amazon.test00.jsonl → new_wiki/qg_squadshifts-test.parquet +2 -2
- data/processed/amazon.test01.jsonl → new_wiki/qg_squadshifts-train.parquet +2 -2
- new_wiki/qg_squadshifts-validation.parquet +3 -0
- nyt/qg_squadshifts-test.parquet +3 -0
- nyt/qg_squadshifts-train.parquet +3 -0
- nyt/qg_squadshifts-validation.parquet +3 -0
- process.py +0 -158
- qg_squadshifts.py +0 -120
- reddit/qg_squadshifts-test.parquet +3 -0
- reddit/qg_squadshifts-train.parquet +3 -0
- reddit/qg_squadshifts-validation.parquet +3 -0
.gitattributes
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
19 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
# Audio files - uncompressed
|
29 |
-
*.pcm filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.sam filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.raw filter=lfs diff=lfs merge=lfs -text
|
32 |
-
# Audio files - compressed
|
33 |
-
*.aac filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.flac filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
36 |
-
*.ogg filter=lfs diff=lfs merge=lfs -text
|
37 |
-
*.wav filter=lfs diff=lfs merge=lfs -text
|
38 |
-
data/processed/nyt.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
39 |
-
data/processed/reddit.test.jsonl filter=lfs diff=lfs merge=lfs -text
|
40 |
-
data/processed/reddit.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
41 |
-
data/processed/amazon.test.jsonl filter=lfs diff=lfs merge=lfs -text
|
42 |
-
data/processed/amazon.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
43 |
-
data/processed/new_wiki.test.jsonl filter=lfs diff=lfs merge=lfs -text
|
44 |
-
data/processed/new_wiki.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
45 |
-
data/processed/reddit.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
46 |
-
data/processed/amazon.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
47 |
-
data/processed/new_wiki.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
48 |
-
data/processed/nyt.test.jsonl filter=lfs diff=lfs merge=lfs -text
|
49 |
-
data/processed/nyt.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
50 |
-
data/processed/amazon.validation00.jsonl filter=lfs diff=lfs merge=lfs -text
|
51 |
-
data/processed/reddit.validation00.jsonl filter=lfs diff=lfs merge=lfs -text
|
52 |
-
data/processed/reddit.train02.jsonl filter=lfs diff=lfs merge=lfs -text
|
53 |
-
data/processed/new_wiki.test01.jsonl filter=lfs diff=lfs merge=lfs -text
|
54 |
-
data/processed/new_wiki.validation00.jsonl filter=lfs diff=lfs merge=lfs -text
|
55 |
-
data/processed/nyt.test00.jsonl filter=lfs diff=lfs merge=lfs -text
|
56 |
-
data/processed/nyt.test01.jsonl filter=lfs diff=lfs merge=lfs -text
|
57 |
-
data/processed/nyt.validation00.jsonl filter=lfs diff=lfs merge=lfs -text
|
58 |
-
data/processed/reddit.test00.jsonl filter=lfs diff=lfs merge=lfs -text
|
59 |
-
data/processed/reddit.train00.jsonl filter=lfs diff=lfs merge=lfs -text
|
60 |
-
data/processed/amazon.test01.jsonl filter=lfs diff=lfs merge=lfs -text
|
61 |
-
data/processed/new_wiki.test00.jsonl filter=lfs diff=lfs merge=lfs -text
|
62 |
-
data/processed/reddit.test02.jsonl filter=lfs diff=lfs merge=lfs -text
|
63 |
-
data/processed/amazon.test02.jsonl filter=lfs diff=lfs merge=lfs -text
|
64 |
-
data/processed/amazon.train00.jsonl filter=lfs diff=lfs merge=lfs -text
|
65 |
-
data/processed/amazon.train01.jsonl filter=lfs diff=lfs merge=lfs -text
|
66 |
-
data/processed/new_wiki.train01.jsonl filter=lfs diff=lfs merge=lfs -text
|
67 |
-
data/processed/reddit.test01.jsonl filter=lfs diff=lfs merge=lfs -text
|
68 |
-
data/processed/amazon.test00.jsonl filter=lfs diff=lfs merge=lfs -text
|
69 |
-
data/processed/amazon.test03.jsonl filter=lfs diff=lfs merge=lfs -text
|
70 |
-
data/processed/new_wiki.train00.jsonl filter=lfs diff=lfs merge=lfs -text
|
71 |
-
data/processed/amazon.validation01.jsonl filter=lfs diff=lfs merge=lfs -text
|
72 |
-
data/processed/nyt.test03.jsonl filter=lfs diff=lfs merge=lfs -text
|
73 |
-
data/processed/nyt.validation01.jsonl filter=lfs diff=lfs merge=lfs -text
|
74 |
-
data/processed/reddit.validation01.jsonl filter=lfs diff=lfs merge=lfs -text
|
75 |
-
data/processed/amazon.train02.jsonl filter=lfs diff=lfs merge=lfs -text
|
76 |
-
data/processed/nyt.test02.jsonl filter=lfs diff=lfs merge=lfs -text
|
77 |
-
data/processed/nyt.train00.jsonl filter=lfs diff=lfs merge=lfs -text
|
78 |
-
data/processed/nyt.train01.jsonl filter=lfs diff=lfs merge=lfs -text
|
79 |
-
data/processed/nyt.train02.jsonl filter=lfs diff=lfs merge=lfs -text
|
80 |
-
data/processed/reddit.test03.jsonl filter=lfs diff=lfs merge=lfs -text
|
81 |
-
data/processed/new_wiki.test02.jsonl filter=lfs diff=lfs merge=lfs -text
|
82 |
-
data/processed/reddit.train01.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
---
|
2 |
-
license: cc-by-4.0
|
3 |
-
pretty_name: SubjQA for question generation
|
4 |
-
language: en
|
5 |
-
multilinguality: monolingual
|
6 |
-
size_categories: 10K<n<100K
|
7 |
-
source_datasets: subjqa
|
8 |
-
task_categories:
|
9 |
-
- text-generation
|
10 |
-
task_ids:
|
11 |
-
- language-modeling
|
12 |
-
tags:
|
13 |
-
- question-generation
|
14 |
-
---
|
15 |
-
|
16 |
-
# Dataset Card for "lmqg/qg_squadshifts"
|
17 |
-
|
18 |
-
## Dataset Description
|
19 |
-
- **Repository:** [https://github.com/asahi417/lm-question-generation](https://github.com/asahi417/lm-question-generation)
|
20 |
-
- **Paper:** [https://arxiv.org/abs/2210.03992](https://arxiv.org/abs/2210.03992)
|
21 |
-
- **Point of Contact:** [Asahi Ushio](http://asahiushio.com/)
|
22 |
-
|
23 |
-
### Dataset Summary
|
24 |
-
This is a subset of [QG-Bench](https://github.com/asahi417/lm-question-generation/blob/master/QG_BENCH.md#datasets), a unified question generation benchmark proposed in
|
25 |
-
["Generative Language Models for Paragraph-Level Question Generation: A Unified Benchmark and Evaluation, EMNLP 2022 main conference"](https://arxiv.org/abs/2210.03992).
|
26 |
-
Modified version of [SQuADShifts](https://modestyachts.github.io/squadshifts-website/index.html) for question generation (QG) task.
|
27 |
-
|
28 |
-
### Supported Tasks and Leaderboards
|
29 |
-
* `question-generation`: The dataset can be used to train a model for question generation.
|
30 |
-
Success on this task is typically measured by achieving a high BLEU4/METEOR/ROUGE-L/BERTScore/MoverScore (see our paper for more in detail).
|
31 |
-
|
32 |
-
### Languages
|
33 |
-
English (en)
|
34 |
-
|
35 |
-
## Dataset Structure
|
36 |
-
|
37 |
-
An example of 'train' looks as follows.
|
38 |
-
```
|
39 |
-
{
|
40 |
-
"question": "has there ever been a legal challange?",
|
41 |
-
"paragraph": "The status of the Armenian Apostolic Church within the Republic of Armenia is defined in the country's constitution. Article 8.1 of the Constitution of Armenia states: "The Republic of Armenia recognizes the exclusive historical mission of the Armenian Apostolic Holy Church as a national church, in the spiritual life, development of the national culture and preservation of the national identity of the people of Armenia." Among others, ethnographer Hranush Kharatyan has questioned the constitutionality of the phrase "national church".",
|
42 |
-
"answer": "Among others, ethnographer Hranush Kharatyan has questioned the constitutionality of the phrase "national church",
|
43 |
-
"sentence": "Article 8.1 of the Constitution of Armenia states: "The Republic of Armenia recognizes the exclusive historical mission of the Armenian Apostolic Holy Church as a national church, in the spiritual life, development of the national culture and preservation of the national identity of the people of Armenia." Among others, ethnographer Hranush Kharatyan has questioned the constitutionality of the phrase "national church",
|
44 |
-
"paragraph_sentence": "The status of the Armenian Apostolic Church within the Republic of Armenia is defined in the country's constitution. <hl> Article 8.1 of the Constitution of Armenia states: "The Republic of Armenia recognizes the exclusive historical mission of the Armenian Apostolic Holy Church as a national church, in the spiritual life, development of the national culture and preservation of the national identity of the people of Armenia." Among others, ethnographer Hranush Kharatyan has questioned the constitutionality of the phrase "national church". <hl>",
|
45 |
-
"paragraph_answer": "The status of the Armenian Apostolic Church within the Republic of Armenia is defined in the country's constitution. Article 8.1 of the Constitution of Armenia states: "The Republic of Armenia recognizes the exclusive historical mission of the Armenian Apostolic Holy Church as a national church, in the spiritual life, development of the national culture and preservation of the national identity of the people of Armenia." <hl> Among others, ethnographer Hranush Kharatyan has questioned the constitutionality of the phrase "national church". <hl>",
|
46 |
-
"sentence_answer": "Article 8.1 of the Constitution of Armenia states: "The Republic of Armenia recognizes the exclusive historical mission of the Armenian Apostolic Holy Church as a national church, in the spiritual life, development of the national culture and preservation of the national identity of the people of Armenia." <hl> Among others, ethnographer Hranush Kharatyan has questioned the constitutionality of the phrase "national church". <hl>"
|
47 |
-
}
|
48 |
-
```
|
49 |
-
The data fields are the same among all splits.
|
50 |
-
- `question`: a `string` feature.
|
51 |
-
- `paragraph`: a `string` feature.
|
52 |
-
- `answer`: a `string` feature.
|
53 |
-
- `sentence`: a `string` feature.
|
54 |
-
- `paragraph_answer`: a `string` feature, which is same as the paragraph but the answer is highlighted by a special token `<hl>`.
|
55 |
-
- `paragraph_sentence`: a `string` feature, which is same as the paragraph but a sentence containing the answer is highlighted by a special token `<hl>`.
|
56 |
-
- `sentence_answer`: a `string` feature, which is same as the sentence but the answer is highlighted by a special token `<hl>`.
|
57 |
-
|
58 |
-
Each of `paragraph_answer`, `paragraph_sentence`, and `sentence_answer` feature is assumed to be used to train a question generation model,
|
59 |
-
but with different information. The `paragraph_answer` and `sentence_answer` features are for answer-aware question generation and
|
60 |
-
`paragraph_sentence` feature is for sentence-aware question generation.
|
61 |
-
|
62 |
-
### Data Splits
|
63 |
-
|
64 |
-
| name |train | valid | test |
|
65 |
-
|-------------|------:|------:|-----:|
|
66 |
-
|default (all)|9209|6283 |18,844|
|
67 |
-
| amazon |3295|1648|4942|
|
68 |
-
| new_wiki |2646|1323|3969|
|
69 |
-
| nyt |3355|1678|5032|
|
70 |
-
| reddit |3268|1634|4901|
|
71 |
-
|
72 |
-
## Citation Information
|
73 |
-
```
|
74 |
-
@inproceedings{ushio-etal-2022-generative,
|
75 |
-
title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration",
|
76 |
-
author = "Ushio, Asahi and
|
77 |
-
Alva-Manchego, Fernando and
|
78 |
-
Camacho-Collados, Jose",
|
79 |
-
booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
|
80 |
-
month = dec,
|
81 |
-
year = "2022",
|
82 |
-
address = "Abu Dhabi, U.A.E.",
|
83 |
-
publisher = "Association for Computational Linguistics",
|
84 |
-
}
|
85 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
all/qg_squadshifts-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0cdcd9c3c7fa3f0b45466bfcb7cd6d57c62f3781508fff46c2fb41aa06b3edea
|
3 |
+
size 41940007
|
all/qg_squadshifts-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fe0c9e17a098f5fbddc7e3071a54f8885410d42091063440ceffc1f13613c735
|
3 |
+
size 28044897
|
all/qg_squadshifts-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7258efb706fe77c937bb3f66e4e14518c48211a9b11cfecfd7a4b804aa9c853d
|
3 |
+
size 14052001
|
amazon/qg_squadshifts-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5c5a690627216154fdd0ca4e88c8a00a1f47cb3876f39cd3d6bda14c89aa98b8
|
3 |
+
size 10356900
|
data/processed/amazon.test02.jsonl → amazon/qg_squadshifts-train.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:955eb892271ce80357bf3d4d8ca1e28e677027f1034b20afde1ba23a8900cb7f
|
3 |
+
size 6997323
|
data/processed/amazon.test03.jsonl → amazon/qg_squadshifts-validation.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bad8c921640978c615d4eaa96c86f3de2ec9ba5a62bbb8ebf325cc23a4a4d288
|
3 |
+
size 3528700
|
data/processed/amazon.train00.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:6052b2e44588d9f4f2cb63f26ca681d22b9aa287eff9635bb0d5f2e4ca97fd9c
|
3 |
-
size 5487087
|
|
|
|
|
|
|
|
data/processed/amazon.train01.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:d2b4735855b4bffe7b881774ef273dec30af1afb234f5129a37f217e73aac244
|
3 |
-
size 5597543
|
|
|
|
|
|
|
|
data/processed/amazon.train02.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:006d4ec56795e7d5a25174512fa9c91e9e9c914a3c41844fecb84e35543e3308
|
3 |
-
size 1108992
|
|
|
|
|
|
|
|
data/processed/amazon.validation00.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:b3360a31c2c131897d677a90b86d94fe841aa56fd4bcc7b116baff1d7c609615
|
3 |
-
size 5515387
|
|
|
|
|
|
|
|
data/processed/amazon.validation01.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:eb963bbbd214eacfd4c3265ee342fe1cdbc860092c27c1ae5a7f110827d98560
|
3 |
-
size 555886
|
|
|
|
|
|
|
|
data/processed/new_wiki.test00.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:ce84ac93052be5e9c33e13dfeaee07895ea6eb1555795fdcfba6e7578518f0ea
|
3 |
-
size 5809893
|
|
|
|
|
|
|
|
data/processed/new_wiki.test01.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:b6dd7fdac216a6043d0e37d3d93bf3c2dfc216a9ff8090a8d48ddf859e915e98
|
3 |
-
size 5839294
|
|
|
|
|
|
|
|
data/processed/new_wiki.test02.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:78ce6f3940cd980581780f3e4ec11a93c6289e5717c4127ea1721d7f0cc9e613
|
3 |
-
size 3761720
|
|
|
|
|
|
|
|
data/processed/new_wiki.train00.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:816d89e0c8841f608afd1554006d06d81fc32663b3f143b1e89c6c1f318a4a32
|
3 |
-
size 5823889
|
|
|
|
|
|
|
|
data/processed/new_wiki.train01.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:4cdf7b91b5b4f2f8c79a9af6ffc4a8550d54c67cfe60b52392ad751d4bc15a62
|
3 |
-
size 4443721
|
|
|
|
|
|
|
|
data/processed/new_wiki.validation00.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:b2d5774039f9e84712f6f86e5d2bfef090f40d08cbe90ec7972adb671804bc53
|
3 |
-
size 5155823
|
|
|
|
|
|
|
|
data/processed/nyt.test00.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:c2ab45020d2ba1cba7f6461f740004f9ce5d4ef858b8d43064f95f1c9cd1b5bf
|
3 |
-
size 6061029
|
|
|
|
|
|
|
|
data/processed/nyt.test01.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:7bcda96580be9e05c2805c5112355a19d3cae4edf63f09584ee13e37b1b8db5b
|
3 |
-
size 5929689
|
|
|
|
|
|
|
|
data/processed/nyt.test02.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:7b42e109dbf474d5a6fbf0a04dd8682ae9ca0b6473937c1a000d059f2f3aa9d6
|
3 |
-
size 5984215
|
|
|
|
|
|
|
|
data/processed/nyt.test03.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:489eb16f16883b9e6392d025b246ae344f58503eddeaf6ec08272111a42751d3
|
3 |
-
size 2077154
|
|
|
|
|
|
|
|
data/processed/nyt.train00.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:b0431aed234ef5f774d37cf67125ba45a427afe93d60a3f579351b099132e8d8
|
3 |
-
size 5922343
|
|
|
|
|
|
|
|
data/processed/nyt.train01.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:13dcc8dbf04d4c8d78cf13bfe573ad2a0258ebe0d56613ec039f2bc8e3316cd2
|
3 |
-
size 5961389
|
|
|
|
|
|
|
|
data/processed/nyt.train02.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:98fa8faee7b5eca5342880daa5c311ebbb670171380711454ebdc34b2138f8a8
|
3 |
-
size 1408146
|
|
|
|
|
|
|
|
data/processed/nyt.validation00.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:ced990654eca70ac4c8bcc432c49c277bf59fe83e9d653ab051feae7a0122992
|
3 |
-
size 5893287
|
|
|
|
|
|
|
|
data/processed/nyt.validation01.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:def01e3d241e815fa5c787b2382956b0678882c753bf730548fda6c7df85c61d
|
3 |
-
size 709002
|
|
|
|
|
|
|
|
data/processed/reddit.test00.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:0a3e045f79238744753ae8b178f1e2157edb3f10d998184db5abe4ca0b4f6f02
|
3 |
-
size 5558554
|
|
|
|
|
|
|
|
data/processed/reddit.test01.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:f75abba46df0d46842ecbfb6d6e937c26111a55e03fa10d5a49c3373748903b8
|
3 |
-
size 5522538
|
|
|
|
|
|
|
|
data/processed/reddit.test02.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:29728a021b0c355eb2214dd7a0f8b26df311c46cef8380f279bb90b255e417b9
|
3 |
-
size 5654220
|
|
|
|
|
|
|
|
data/processed/reddit.test03.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:1f4d0ee8d421a2606fb43ee4c632cddf27290c67d011c28f501eb15bc4d60bbc
|
3 |
-
size 1453265
|
|
|
|
|
|
|
|
data/processed/reddit.train00.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:e8040b5c311161f4263b82b3eb7b8cf4e9df0711ea2b21b81b844d84eba455a5
|
3 |
-
size 5611873
|
|
|
|
|
|
|
|
data/processed/reddit.train01.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:3ac2bec724fc5fbfaa4a98275d47598db19664b631cc1cf40013e7f059dc762c
|
3 |
-
size 5561360
|
|
|
|
|
|
|
|
data/processed/reddit.train02.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:7199f57ffb2246d2c3cf73eac6e084b2f92f0f9f8dff987ec051a366c1e97921
|
3 |
-
size 999150
|
|
|
|
|
|
|
|
data/processed/reddit.validation00.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:5d7d95bc43edec19adf6855a67daf4e42160060962b0eb074c84c3c78e64ae58
|
3 |
-
size 5466110
|
|
|
|
|
|
|
|
data/processed/reddit.validation01.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:2391b08e38fccc145aaac5aea06c9096aae5c0e2b9510cbc94aa480ffbd989b1
|
3 |
-
size 485303
|
|
|
|
|
|
|
|
data/processed/amazon.test00.jsonl → new_wiki/qg_squadshifts-test.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e1ebfc3910d39d52dcebef74eb5bc89914c93e5be852d6f98622fb351a380b83
|
3 |
+
size 8621366
|
data/processed/amazon.test01.jsonl → new_wiki/qg_squadshifts-train.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:183f1d0e5b927a7b812a19ae3af7c642b1f0fc5f4e78ec3bd09ebe95191eaf60
|
3 |
+
size 5754745
|
new_wiki/qg_squadshifts-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:601e1c5cb6587259bcbe853cf273640a9e8e29f08b29145f2ca87e0f0ebd2280
|
3 |
+
size 2936252
|
nyt/qg_squadshifts-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:296c892f72559487a213c7db31aeebbfe8412504baf6ca3a1a3adefc133a3074
|
3 |
+
size 12091778
|
nyt/qg_squadshifts-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2ac83daa9c7fc2bf14b9cdc4da975608781242b2c6c4e45bb2a5ca52133ed8bd
|
3 |
+
size 8054251
|
nyt/qg_squadshifts-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d698fc7556cc9a5abc7259ec39cba288afcd94bbbd1bb23e5cab70a42cccb6e4
|
3 |
+
size 4043738
|
process.py
DELETED
@@ -1,158 +0,0 @@
|
|
1 |
-
""" Script to process raw SQuADshift file for Question Generation format
|
2 |
-
cd data/processed
|
3 |
-
gsplit -l 1500 -d --additional-suffix=.jsonl new_wiki.test.jsonl new_wiki.test
|
4 |
-
gsplit -l 1500 -d --additional-suffix=.jsonl nyt.test.jsonl nyt.test
|
5 |
-
gsplit -l 1500 -d --additional-suffix=.jsonl reddit.test.jsonl reddit.test
|
6 |
-
gsplit -l 1500 -d --additional-suffix=.jsonl amazon.test.jsonl amazon.test
|
7 |
-
|
8 |
-
gsplit -l 1500 -d --additional-suffix=.jsonl new_wiki.train.jsonl new_wiki.train
|
9 |
-
gsplit -l 1500 -d --additional-suffix=.jsonl nyt.train.jsonl nyt.train
|
10 |
-
gsplit -l 1500 -d --additional-suffix=.jsonl reddit.train.jsonl reddit.train
|
11 |
-
gsplit -l 1500 -d --additional-suffix=.jsonl amazon.train.jsonl amazon.train
|
12 |
-
|
13 |
-
gsplit -l 1500 -d --additional-suffix=.jsonl new_wiki.validation.jsonl new_wiki.validation
|
14 |
-
gsplit -l 1500 -d --additional-suffix=.jsonl nyt.validation.jsonl nyt.validation
|
15 |
-
gsplit -l 1500 -d --additional-suffix=.jsonl reddit.validation.jsonl reddit.validation
|
16 |
-
gsplit -l 1500 -d --additional-suffix=.jsonl amazon.validation.jsonl amazon.validation
|
17 |
-
|
18 |
-
rm -rf new_wiki.test.jsonl
|
19 |
-
rm -rf nyt.test.jsonl
|
20 |
-
rm -rf reddit.test.jsonl
|
21 |
-
rm -rf amazon.test.jsonl
|
22 |
-
|
23 |
-
rm -rf new_wiki.train.jsonl
|
24 |
-
rm -rf nyt.train.jsonl
|
25 |
-
rm -rf reddit.train.jsonl
|
26 |
-
rm -rf amazon.train.jsonl
|
27 |
-
|
28 |
-
rm -rf new_wiki.validation.jsonl
|
29 |
-
rm -rf nyt.validation.jsonl
|
30 |
-
rm -rf reddit.validation.jsonl
|
31 |
-
rm -rf amazon.validation.jsonl
|
32 |
-
|
33 |
-
"""
|
34 |
-
import json
|
35 |
-
import os
|
36 |
-
import re
|
37 |
-
from random import shuffle, seed
|
38 |
-
from tqdm import tqdm
|
39 |
-
|
40 |
-
import spacy
|
41 |
-
from datasets import load_dataset
|
42 |
-
|
43 |
-
DATASET_NAME = "squadshifts"
|
44 |
-
DATASET_TYPES = ['new_wiki', 'nyt', 'reddit', 'amazon']
|
45 |
-
HIGHLIGHT_TOKEN = '<hl>'
|
46 |
-
GENERATE_TEST_SPLIT = True
|
47 |
-
SPLITTER = spacy.load('en_core_web_sm')
|
48 |
-
|
49 |
-
|
50 |
-
def get_sentence(document: str): return [str(sent) for sent in SPLITTER(document).sents]
|
51 |
-
|
52 |
-
|
53 |
-
def process_single_data(question: str, paragraph: str, answer: str):
|
54 |
-
""" Convert single raw json data into QG format """
|
55 |
-
example = {'question': question, 'paragraph': paragraph, 'answer': answer}
|
56 |
-
start = example['paragraph'].find(example['answer'])
|
57 |
-
end = start + len(answer)
|
58 |
-
assert paragraph[start:end] == answer
|
59 |
-
# get sentence
|
60 |
-
before_tmp = get_sentence(example['paragraph'][:start])
|
61 |
-
if len(before_tmp) == 0:
|
62 |
-
before = ''
|
63 |
-
before_sentence = ''
|
64 |
-
else:
|
65 |
-
if before_tmp[-1].endswith('.'):
|
66 |
-
before = ' '.join(before_tmp)
|
67 |
-
before_sentence = ''
|
68 |
-
else:
|
69 |
-
before = ' '.join(before_tmp[:-1])
|
70 |
-
before_sentence = before_tmp[-1]
|
71 |
-
before_sentence = before_sentence if before_sentence.endswith(' ') else f'{before_sentence} '
|
72 |
-
after_tmp = get_sentence(example['paragraph'][start + len(example['answer']):])
|
73 |
-
if len(after_tmp) == 0:
|
74 |
-
after = ''
|
75 |
-
after_sentence = ''
|
76 |
-
else:
|
77 |
-
after = ' '.join(after_tmp[1:])
|
78 |
-
after_sentence = after_tmp[0]
|
79 |
-
after_sentence = after_sentence if after_sentence.startswith(' ') else f' {after_sentence}'
|
80 |
-
example['sentence'] = f"{before_sentence}{example['answer']}{after_sentence}"
|
81 |
-
|
82 |
-
# get paragraph_sentence
|
83 |
-
before = '' if before == '' else f'{before} '
|
84 |
-
after = '' if after == '' else f' {after}'
|
85 |
-
source_text = '{0}{1} {2} {1}{3}'.format(before, HIGHLIGHT_TOKEN, example['sentence'], after)
|
86 |
-
example['paragraph_sentence'] = re.sub(r'\s+', ' ', source_text)
|
87 |
-
|
88 |
-
# get paragraph_answer
|
89 |
-
source_text = '{0}{1} {2} {1}{3}'.format(
|
90 |
-
example['paragraph'][:start], HIGHLIGHT_TOKEN, example['answer'],
|
91 |
-
example['paragraph'][start + len(example['answer']):])
|
92 |
-
example['paragraph_answer'] = re.sub(r'\s+', ' ', source_text)
|
93 |
-
|
94 |
-
# get sentence_answer
|
95 |
-
if len(before_tmp) == 0 or before_tmp[-1].endswith('.'):
|
96 |
-
before = ''
|
97 |
-
else:
|
98 |
-
before = before_tmp[-1] if before_tmp[-1].endswith(' ') else f'{before_tmp[-1]} '
|
99 |
-
if len(after_tmp) == 0:
|
100 |
-
after = ''
|
101 |
-
else:
|
102 |
-
after = after_tmp[0] if after_tmp[0].startswith(' ') else f' {after_tmp[0]}'
|
103 |
-
source_text = '{0}{1} {2} {1}{3}'.format(before, HIGHLIGHT_TOKEN, example['answer'], after)
|
104 |
-
example['sentence_answer'] = re.sub(r'\s+', ' ', source_text)
|
105 |
-
|
106 |
-
return example
|
107 |
-
|
108 |
-
|
109 |
-
if __name__ == '__main__':
|
110 |
-
output = './data/processed'
|
111 |
-
os.makedirs(output, exist_ok=True)
|
112 |
-
for data_type in DATASET_TYPES:
|
113 |
-
dataset = load_dataset(DATASET_NAME, data_type)
|
114 |
-
_split = 'test'
|
115 |
-
tmp_dataset = dataset[_split]
|
116 |
-
full_data = []
|
117 |
-
for single_data in tqdm(tmp_dataset):
|
118 |
-
question_str = single_data['question'] #.replace("\n", ".").replace('"', "'")
|
119 |
-
paragraph_str = single_data['context'] #.replace("\n", ".").replace('"', "'")
|
120 |
-
answer_str = single_data['answers']['text']
|
121 |
-
if type(answer_str) == list:
|
122 |
-
answer_str = answer_str[0]
|
123 |
-
assert type(answer_str) is str, answer_str
|
124 |
-
assert type(question_str) is str, question_str
|
125 |
-
assert type(paragraph_str) is str, paragraph_str
|
126 |
-
tmp_data = process_single_data(question=question_str, paragraph=paragraph_str, answer=answer_str)
|
127 |
-
tmp_data['paragraph_id'] = single_data['id']
|
128 |
-
full_data.append(tmp_data)
|
129 |
-
|
130 |
-
# split test into train/valid/test
|
131 |
-
test_size = int(len(full_data)/2)
|
132 |
-
train_size = int((len(full_data) - test_size) * 2/3)
|
133 |
-
# train_size = 2500
|
134 |
-
valid_size = len(full_data) - train_size - test_size
|
135 |
-
assert train_size + test_size + valid_size == len(full_data), f"{train_size}, {test_size}, {valid_size}"
|
136 |
-
paragraph_ids = list(set([i['paragraph_id'] for i in full_data]))
|
137 |
-
data_dict = {p: [i for i in full_data if i['paragraph_id'] == p] for p in paragraph_ids}
|
138 |
-
seed(0)
|
139 |
-
shuffle(paragraph_ids)
|
140 |
-
lines_train = []
|
141 |
-
lines_test = []
|
142 |
-
lines_valid = []
|
143 |
-
|
144 |
-
for i in paragraph_ids:
|
145 |
-
if len(lines_test) < test_size:
|
146 |
-
lines_test += data_dict[i]
|
147 |
-
elif len(lines_train) < train_size:
|
148 |
-
lines_train += data_dict[i]
|
149 |
-
else:
|
150 |
-
lines_valid += data_dict[i]
|
151 |
-
print(f'STATS(train/valid/test): {data_type}| {len(lines_train)}/{len(lines_valid)}/{len(lines_test)}')
|
152 |
-
with open(f'{output}/{data_type}.test.jsonl', 'w') as f:
|
153 |
-
f.write('\n'.join([json.dumps(i) for i in lines_test]))
|
154 |
-
with open(f'{output}/{data_type}.train.jsonl', 'w') as f:
|
155 |
-
f.write('\n'.join([json.dumps(i) for i in lines_train]))
|
156 |
-
with open(f'{output}/{data_type}.validation.jsonl', 'w') as f:
|
157 |
-
f.write('\n'.join([json.dumps(i) for i in lines_valid]))
|
158 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
qg_squadshifts.py
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
""" python -c "from datasets import load_dataset;load_dataset('.')" """
|
2 |
-
import json
|
3 |
-
from itertools import chain
|
4 |
-
import datasets
|
5 |
-
|
6 |
-
logger = datasets.logging.get_logger(__name__)
|
7 |
-
_VERSION = "5.0.1"
|
8 |
-
_CITATION = """
|
9 |
-
@inproceedings{ushio-etal-2022-generative,
|
10 |
-
title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration",
|
11 |
-
author = "Ushio, Asahi and
|
12 |
-
Alva-Manchego, Fernando and
|
13 |
-
Camacho-Collados, Jose",
|
14 |
-
booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
|
15 |
-
month = dec,
|
16 |
-
year = "2022",
|
17 |
-
address = "Abu Dhabi, U.A.E.",
|
18 |
-
publisher = "Association for Computational Linguistics",
|
19 |
-
}
|
20 |
-
"""
|
21 |
-
_DESCRIPTION = """[SQuAD Shifts](https://modestyachts.github.io/squadshifts-website/index.html) dataset for question generation (QG) task."""
|
22 |
-
_URL = 'https://huggingface.co/datasets/lmqg/qg_squadshifts/resolve/main/data/processed'
|
23 |
-
_FILES = {
|
24 |
-
str(datasets.Split.TEST): {
|
25 |
-
'new_wiki': [f'{_URL}/new_wiki.test{i:02d}.jsonl' for i in range(3)],
|
26 |
-
'nyt': [f'{_URL}/nyt.test{i:02d}.jsonl' for i in range(4)],
|
27 |
-
'reddit': [f'{_URL}/reddit.test{i:02d}.jsonl' for i in range(4)],
|
28 |
-
'amazon': [f'{_URL}/amazon.test{i:02d}.jsonl' for i in range(4)]
|
29 |
-
},
|
30 |
-
str(datasets.Split.TRAIN): {
|
31 |
-
'new_wiki': [f'{_URL}/new_wiki.train{i:02d}.jsonl' for i in range(2)],
|
32 |
-
'nyt': [f'{_URL}/nyt.train{i:02d}.jsonl' for i in range(3)],
|
33 |
-
'reddit': [f'{_URL}/reddit.train{i:02d}.jsonl' for i in range(3)],
|
34 |
-
'amazon': [f'{_URL}/amazon.train{i:02d}.jsonl' for i in range(3)]
|
35 |
-
},
|
36 |
-
str(datasets.Split.VALIDATION): {
|
37 |
-
'new_wiki': [f'{_URL}/new_wiki.validation{i:02d}.jsonl' for i in range(1)],
|
38 |
-
'nyt': [f'{_URL}/nyt.validation{i:02d}.jsonl' for i in range(2)],
|
39 |
-
'reddit': [f'{_URL}/reddit.validation{i:02d}.jsonl' for i in range(2)],
|
40 |
-
'amazon': [f'{_URL}/amazon.validation{i:02d}.jsonl' for i in range(2)]
|
41 |
-
},
|
42 |
-
}
|
43 |
-
# _FILES = {
|
44 |
-
# str(datasets.Split.TEST): {
|
45 |
-
# 'new_wiki': [f'{_URL}/new_wiki.test.jsonl'],
|
46 |
-
# 'nyt': [f'{_URL}/nyt.test.jsonl'],
|
47 |
-
# 'reddit': [f'{_URL}/reddit.test.jsonl'],
|
48 |
-
# 'amazon': [f'{_URL}/amazon.test.jsonl']
|
49 |
-
# },
|
50 |
-
# str(datasets.Split.TRAIN): {
|
51 |
-
# 'new_wiki': [f'{_URL}/new_wiki.train.jsonl'],
|
52 |
-
# 'nyt': [f'{_URL}/nyt.train.jsonl'],
|
53 |
-
# 'reddit': [f'{_URL}/reddit.train.jsonl'],
|
54 |
-
# 'amazon': [f'{_URL}/amazon.train.jsonl']
|
55 |
-
# },
|
56 |
-
# str(datasets.Split.VALIDATION): {
|
57 |
-
# 'new_wiki': [f'{_URL}/new_wiki.validation.jsonl'],
|
58 |
-
# 'nyt': [f'{_URL}/nyt.validation.jsonl'],
|
59 |
-
# 'reddit': [f'{_URL}/reddit.validation.jsonl'],
|
60 |
-
# 'amazon': [f'{_URL}/amazon.validation.jsonl']
|
61 |
-
# },
|
62 |
-
# }
|
63 |
-
_DOMAIN = list(_FILES[list(_FILES.keys())[0]].keys())
|
64 |
-
|
65 |
-
|
66 |
-
class QGSQuADShiftsConfig(datasets.BuilderConfig):
|
67 |
-
"""BuilderConfig for SquadQG"""
|
68 |
-
|
69 |
-
def __init__(self, **kwargs):
|
70 |
-
"""BuilderConfig for SquadQG.
|
71 |
-
Args:
|
72 |
-
**kwargs: keyword arguments forwarded to super.
|
73 |
-
"""
|
74 |
-
super(QGSQuADShiftsConfig, self).__init__(**kwargs)
|
75 |
-
|
76 |
-
|
77 |
-
class QGSQuADShifts(datasets.GeneratorBasedBuilder):
|
78 |
-
|
79 |
-
BUILDER_CONFIGS = [QGSQuADShiftsConfig(name="all", version=datasets.Version(_VERSION), description="All domain.")]
|
80 |
-
BUILDER_CONFIGS += [QGSQuADShiftsConfig(name=i, version=datasets.Version(_VERSION), description=f"Domain {i}") for i in sorted(_DOMAIN)]
|
81 |
-
|
82 |
-
def _info(self):
|
83 |
-
return datasets.DatasetInfo(
|
84 |
-
description=_DESCRIPTION,
|
85 |
-
features=datasets.Features(
|
86 |
-
{
|
87 |
-
"answer": datasets.Value("string"), "paragraph_question": datasets.Value("string"),
|
88 |
-
"question": datasets.Value("string"),
|
89 |
-
"sentence": datasets.Value("string"),
|
90 |
-
"paragraph": datasets.Value("string"),
|
91 |
-
"sentence_answer": datasets.Value("string"),
|
92 |
-
"paragraph_answer": datasets.Value("string"),
|
93 |
-
"paragraph_sentence": datasets.Value("string"),
|
94 |
-
"paragraph_id": datasets.Value("string")
|
95 |
-
}
|
96 |
-
),
|
97 |
-
supervised_keys=None,
|
98 |
-
homepage="https://github.com/asahi417/lm-question-generation"
|
99 |
-
)
|
100 |
-
|
101 |
-
def _split_generators(self, dl_manager):
|
102 |
-
if self.config.name == 'all':
|
103 |
-
downloaded_file = dl_manager.download_and_extract({k: list(chain(*list(v.values()))) for k, v in _FILES.items()})
|
104 |
-
else:
|
105 |
-
downloaded_file = dl_manager.download_and_extract({k: v[self.config.name] for k, v in _FILES.items()})
|
106 |
-
return [datasets.SplitGenerator(name=k, gen_kwargs={"filepaths": downloaded_file[k]}) for k in _FILES.keys()]
|
107 |
-
|
108 |
-
def _generate_examples(self, filepaths):
|
109 |
-
_key = 0
|
110 |
-
for filepath in filepaths:
|
111 |
-
logger.info("generating examples from = %s", filepath)
|
112 |
-
with open(filepath, encoding="utf-8") as f:
|
113 |
-
_list = f.read().split('\n')
|
114 |
-
if _list[-1] == '':
|
115 |
-
_list = _list[:-1]
|
116 |
-
for i in _list:
|
117 |
-
data = json.loads(i)
|
118 |
-
yield _key, data
|
119 |
-
_key += 1
|
120 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
reddit/qg_squadshifts-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1dd7cd9071fad8899aeb6ff3cc7097d928c31ce1e488f500b290f0ffd3d22e5c
|
3 |
+
size 10856496
|
reddit/qg_squadshifts-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1e66473cdb3d4e294cc20e2377bf7dd9282a5e4ef9650399d6807ebf50d5cd0e
|
3 |
+
size 7270800
|
reddit/qg_squadshifts-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c846e09614c7292911a73ad881e56cd55efbde6d716232a7f7e30770e3c53cf5
|
3 |
+
size 3560752
|