yoshitomo-matsubara commited on
Commit
f133cbc
1 Parent(s): d3917d5

Upload 68 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +65 -0
  2. LICENSE.md +35 -0
  3. README.md +210 -0
  4. jsonl/en-to-x/bn_from_en-dev.jsonl +3 -0
  5. jsonl/en-to-x/bn_from_en-test.jsonl +3 -0
  6. jsonl/en-to-x/bn_from_en-train.jsonl +3 -0
  7. jsonl/en-to-x/fi_from_en-dev.jsonl +3 -0
  8. jsonl/en-to-x/fi_from_en-test.jsonl +3 -0
  9. jsonl/en-to-x/fi_from_en-train.jsonl +3 -0
  10. jsonl/en-to-x/id_from_en-dev.jsonl +3 -0
  11. jsonl/en-to-x/id_from_en-test.jsonl +3 -0
  12. jsonl/en-to-x/id_from_en-train.jsonl +3 -0
  13. jsonl/en-to-x/ja_from_en-dev.jsonl +3 -0
  14. jsonl/en-to-x/ja_from_en-test.jsonl +3 -0
  15. jsonl/en-to-x/ja_from_en-train.jsonl +3 -0
  16. jsonl/en-to-x/ko_from_en-dev.jsonl +3 -0
  17. jsonl/en-to-x/ko_from_en-test.jsonl +3 -0
  18. jsonl/en-to-x/ko_from_en-train.jsonl +3 -0
  19. jsonl/en-to-x/ru_from_en-dev.jsonl +3 -0
  20. jsonl/en-to-x/ru_from_en-test.jsonl +3 -0
  21. jsonl/en-to-x/ru_from_en-train.jsonl +3 -0
  22. jsonl/en-to-x/sw_from_en-dev.jsonl +3 -0
  23. jsonl/en-to-x/sw_from_en-test.jsonl +3 -0
  24. jsonl/en-to-x/sw_from_en-train.jsonl +3 -0
  25. jsonl/original/bn-dev.jsonl +3 -0
  26. jsonl/original/bn-test.jsonl +3 -0
  27. jsonl/original/bn-train.jsonl +3 -0
  28. jsonl/original/en-dev.jsonl +3 -0
  29. jsonl/original/en-test.jsonl +3 -0
  30. jsonl/original/en-train.jsonl +3 -0
  31. jsonl/original/fi-dev.jsonl +3 -0
  32. jsonl/original/fi-test.jsonl +3 -0
  33. jsonl/original/fi-train.jsonl +3 -0
  34. jsonl/original/id-dev.jsonl +3 -0
  35. jsonl/original/id-test.jsonl +3 -0
  36. jsonl/original/id-train.jsonl +3 -0
  37. jsonl/original/ja-dev.jsonl +3 -0
  38. jsonl/original/ja-test.jsonl +3 -0
  39. jsonl/original/ja-train.jsonl +3 -0
  40. jsonl/original/ko-dev.jsonl +3 -0
  41. jsonl/original/ko-test.jsonl +3 -0
  42. jsonl/original/ko-train.jsonl +3 -0
  43. jsonl/original/ru-dev.jsonl +3 -0
  44. jsonl/original/ru-test.jsonl +3 -0
  45. jsonl/original/ru-train.jsonl +3 -0
  46. jsonl/original/sw-dev.jsonl +3 -0
  47. jsonl/original/sw-test.jsonl +3 -0
  48. jsonl/original/sw-train.jsonl +3 -0
  49. jsonl/x-to-en/en_from_bn-dev.jsonl +3 -0
  50. jsonl/x-to-en/en_from_bn-test.jsonl +0 -0
.gitattributes CHANGED
@@ -52,3 +52,68 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
55
+ jsonl/en-to-x/bn_from_en-dev.jsonl filter=lfs diff=lfs merge=lfs -text
56
+ jsonl/en-to-x/bn_from_en-test.jsonl filter=lfs diff=lfs merge=lfs -text
57
+ jsonl/en-to-x/bn_from_en-train.jsonl filter=lfs diff=lfs merge=lfs -text
58
+ jsonl/en-to-x/fi_from_en-dev.jsonl filter=lfs diff=lfs merge=lfs -text
59
+ jsonl/en-to-x/fi_from_en-test.jsonl filter=lfs diff=lfs merge=lfs -text
60
+ jsonl/en-to-x/fi_from_en-train.jsonl filter=lfs diff=lfs merge=lfs -text
61
+ jsonl/en-to-x/id_from_en-dev.jsonl filter=lfs diff=lfs merge=lfs -text
62
+ jsonl/en-to-x/id_from_en-test.jsonl filter=lfs diff=lfs merge=lfs -text
63
+ jsonl/en-to-x/id_from_en-train.jsonl filter=lfs diff=lfs merge=lfs -text
64
+ jsonl/en-to-x/ja_from_en-dev.jsonl filter=lfs diff=lfs merge=lfs -text
65
+ jsonl/en-to-x/ja_from_en-test.jsonl filter=lfs diff=lfs merge=lfs -text
66
+ jsonl/en-to-x/ja_from_en-train.jsonl filter=lfs diff=lfs merge=lfs -text
67
+ jsonl/en-to-x/ko_from_en-dev.jsonl filter=lfs diff=lfs merge=lfs -text
68
+ jsonl/en-to-x/ko_from_en-test.jsonl filter=lfs diff=lfs merge=lfs -text
69
+ jsonl/en-to-x/ko_from_en-train.jsonl filter=lfs diff=lfs merge=lfs -text
70
+ jsonl/en-to-x/ru_from_en-dev.jsonl filter=lfs diff=lfs merge=lfs -text
71
+ jsonl/en-to-x/ru_from_en-test.jsonl filter=lfs diff=lfs merge=lfs -text
72
+ jsonl/en-to-x/ru_from_en-train.jsonl filter=lfs diff=lfs merge=lfs -text
73
+ jsonl/en-to-x/sw_from_en-dev.jsonl filter=lfs diff=lfs merge=lfs -text
74
+ jsonl/en-to-x/sw_from_en-test.jsonl filter=lfs diff=lfs merge=lfs -text
75
+ jsonl/en-to-x/sw_from_en-train.jsonl filter=lfs diff=lfs merge=lfs -text
76
+ jsonl/original/bn-dev.jsonl filter=lfs diff=lfs merge=lfs -text
77
+ jsonl/original/bn-test.jsonl filter=lfs diff=lfs merge=lfs -text
78
+ jsonl/original/bn-train.jsonl filter=lfs diff=lfs merge=lfs -text
79
+ jsonl/original/en-dev.jsonl filter=lfs diff=lfs merge=lfs -text
80
+ jsonl/original/en-test.jsonl filter=lfs diff=lfs merge=lfs -text
81
+ jsonl/original/en-train.jsonl filter=lfs diff=lfs merge=lfs -text
82
+ jsonl/original/fi-dev.jsonl filter=lfs diff=lfs merge=lfs -text
83
+ jsonl/original/fi-test.jsonl filter=lfs diff=lfs merge=lfs -text
84
+ jsonl/original/fi-train.jsonl filter=lfs diff=lfs merge=lfs -text
85
+ jsonl/original/id-dev.jsonl filter=lfs diff=lfs merge=lfs -text
86
+ jsonl/original/id-test.jsonl filter=lfs diff=lfs merge=lfs -text
87
+ jsonl/original/id-train.jsonl filter=lfs diff=lfs merge=lfs -text
88
+ jsonl/original/ja-dev.jsonl filter=lfs diff=lfs merge=lfs -text
89
+ jsonl/original/ja-test.jsonl filter=lfs diff=lfs merge=lfs -text
90
+ jsonl/original/ja-train.jsonl filter=lfs diff=lfs merge=lfs -text
91
+ jsonl/original/ko-dev.jsonl filter=lfs diff=lfs merge=lfs -text
92
+ jsonl/original/ko-test.jsonl filter=lfs diff=lfs merge=lfs -text
93
+ jsonl/original/ko-train.jsonl filter=lfs diff=lfs merge=lfs -text
94
+ jsonl/original/ru-dev.jsonl filter=lfs diff=lfs merge=lfs -text
95
+ jsonl/original/ru-test.jsonl filter=lfs diff=lfs merge=lfs -text
96
+ jsonl/original/ru-train.jsonl filter=lfs diff=lfs merge=lfs -text
97
+ jsonl/original/sw-dev.jsonl filter=lfs diff=lfs merge=lfs -text
98
+ jsonl/original/sw-test.jsonl filter=lfs diff=lfs merge=lfs -text
99
+ jsonl/original/sw-train.jsonl filter=lfs diff=lfs merge=lfs -text
100
+ jsonl/x-to-en/en_from_bn-dev.jsonl filter=lfs diff=lfs merge=lfs -text
101
+ jsonl/x-to-en/en_from_bn-train.jsonl filter=lfs diff=lfs merge=lfs -text
102
+ jsonl/x-to-en/en_from_fi-dev.jsonl filter=lfs diff=lfs merge=lfs -text
103
+ jsonl/x-to-en/en_from_fi-test.jsonl filter=lfs diff=lfs merge=lfs -text
104
+ jsonl/x-to-en/en_from_fi-train.jsonl filter=lfs diff=lfs merge=lfs -text
105
+ jsonl/x-to-en/en_from_id-dev.jsonl filter=lfs diff=lfs merge=lfs -text
106
+ jsonl/x-to-en/en_from_id-test.jsonl filter=lfs diff=lfs merge=lfs -text
107
+ jsonl/x-to-en/en_from_id-train.jsonl filter=lfs diff=lfs merge=lfs -text
108
+ jsonl/x-to-en/en_from_ja-dev.jsonl filter=lfs diff=lfs merge=lfs -text
109
+ jsonl/x-to-en/en_from_ja-test.jsonl filter=lfs diff=lfs merge=lfs -text
110
+ jsonl/x-to-en/en_from_ja-train.jsonl filter=lfs diff=lfs merge=lfs -text
111
+ jsonl/x-to-en/en_from_ko-dev.jsonl filter=lfs diff=lfs merge=lfs -text
112
+ jsonl/x-to-en/en_from_ko-test.jsonl filter=lfs diff=lfs merge=lfs -text
113
+ jsonl/x-to-en/en_from_ko-train.jsonl filter=lfs diff=lfs merge=lfs -text
114
+ jsonl/x-to-en/en_from_ru-dev.jsonl filter=lfs diff=lfs merge=lfs -text
115
+ jsonl/x-to-en/en_from_ru-test.jsonl filter=lfs diff=lfs merge=lfs -text
116
+ jsonl/x-to-en/en_from_ru-train.jsonl filter=lfs diff=lfs merge=lfs -text
117
+ jsonl/x-to-en/en_from_sw-dev.jsonl filter=lfs diff=lfs merge=lfs -text
118
+ jsonl/x-to-en/en_from_sw-test.jsonl filter=lfs diff=lfs merge=lfs -text
119
+ jsonl/x-to-en/en_from_sw-train.jsonl filter=lfs diff=lfs merge=lfs -text
LICENSE.md ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Community Data License Agreement - Permissive - Version 2.0
2
+
3
+ This is the Community Data License Agreement - Permissive, Version 2.0 (the "agreement"). Data Provider(s) and Data Recipient(s) agree as follows:
4
+
5
+ ## 1. Provision of the Data
6
+
7
+ 1.1. A Data Recipient may use, modify, and share the Data made available by Data Provider(s) under this agreement if that Data Recipient follows the terms of this agreement.
8
+
9
+ 1.2. This agreement does not impose any restriction on a Data Recipient's use, modification, or sharing of any portions of the Data that are in the public domain or that may be used, modified, or shared under any other legal exception or limitation.
10
+
11
+ ## 2. Conditions for Sharing Data
12
+
13
+ 2.1. A Data Recipient may share Data, with or without modifications, so long as the Data Recipient makes available the text of this agreement with the shared Data.
14
+
15
+ ## 3. No Restrictions on Results
16
+
17
+ 3.1. This agreement does not impose any restriction or obligations with respect to the use, modification, or sharing of Results.
18
+
19
+ ## 4. No Warranty; Limitation of Liability
20
+
21
+ 4.1. All Data Recipients receive the Data subject to the following terms:
22
+
23
+ THE DATA IS PROVIDED ON AN "AS IS" BASIS, WITHOUT REPRESENTATIONS, WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
24
+
25
+ NO DATA PROVIDER SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE DATA OR RESULTS, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
26
+
27
+ ## 5. Definitions
28
+
29
+ 5.1. "Data" means the material received by a Data Recipient under this agreement.
30
+
31
+ 5.2. "Data Provider" means any person who is the source of Data provided under this agreement and in reliance on a Data Recipient's agreement to its terms.
32
+
33
+ 5.3. "Data Recipient" means any person who receives Data directly or indirectly from a Data Provider and agrees to the terms of this agreement.
34
+
35
+ 5.4. "Results" means any outcome obtained by computational analysis of Data, including for example machine learning models and models' insights.
README.md ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language:
5
+ - bn
6
+ - en
7
+ - fi
8
+ - id
9
+ - ja
10
+ - ko
11
+ - ru
12
+ - sw
13
+ language_creators:
14
+ - found
15
+ #license:
16
+ license_details: https://huggingface.co/datasets/AmazonScience/tydi-as2/blob/main/LICENSE.md
17
+ multilinguality:
18
+ - multilingual
19
+ - translation
20
+ pretty_name: tydi-as2
21
+ size_categories:
22
+ - 10M<n<100M
23
+ source_datasets:
24
+ - extended|tydiqa
25
+ tags:
26
+ - as2
27
+ - answer sentence selection
28
+ - text retrieval
29
+ - question answering
30
+ task_categories:
31
+ - question-answering
32
+ - text-retrieval
33
+ task_ids:
34
+ - open-domain-qa
35
+ ---
36
+
37
+
38
+ # TyDi-AS2
39
+
40
+ ## Table of Contents
41
+ - [Dataset Card Creation Guide](#dataset-card-creation-guide)
42
+ - [Table of Contents](#table-of-contents)
43
+ - [Dataset Description](#dataset-description)
44
+ - [Dataset Summary](#dataset-summary)
45
+ - [Languages](#languages)
46
+ - [TyDi-AS2](#tydi-as2)
47
+ - [Xtr-TyDi-AS2](#xtr-tydi-as2)
48
+ - [Dataset Structure](#dataset-structure)
49
+ - [Data Instances](#data-instances)
50
+ - [Data Fields](#data-fields)
51
+ - [Data Splits](#data-splits)
52
+ - [Dataset Creation](#dataset-creation)
53
+ - [Source Data](#source-data)
54
+ - [Annotations](#annotations)
55
+ - [Annotation process](#annotation-process)
56
+ - [Who are the annotators?](#who-are-the-annotators)
57
+ - [Additional Information](#additional-information)
58
+ - [Dataset Curators](#dataset-curators)
59
+ - [Licensing Information](#licensing-information)
60
+ - [Citation Information](#citation-information)
61
+ - [Contributions](#contributions)
62
+
63
+ ## Dataset Description
64
+
65
+ - **Homepage:** [Amazon Science]()
66
+ - **Paper:** [Cross-Lingual Knowledge Distillation for Answer Sentence Selection in Low-Resource Languages](https://arxiv.org/abs/2305.16302)
67
+ - **Point of Contact:** [Yoshitomo Matsubara](yomtsub@amazon.com)
68
+
69
+ ### Dataset Summary
70
+
71
+ ***TyDi-AS2*** and ***Xtr-TyDi-AS2*** are multilingual Answer Sentence Selection (AS2) datasets comprising 8 diverse languages, proposed in our paper accepted at ACL 2023 (Findings): **Cross-Lingual Knowledge Distillation for Answer Sentence Selection in Low-Resource Languages**.
72
+ Both the datasets were created from [TyDi-QA](https://ai.google.com/research/tydiqa), a multilingual question-answering dataset. TyDi-AS2 was created by converting the QA instances in TyDi-QA to AS2 instances (see [Dataset Creation](#dataset-creation) for details). Xtr-TyDi-AS2 was created by translating the non-English TyDi-AS2 instances to English and vise versa.
73
+ For translations, we used [Amazon Translate](https://aws.amazon.com/translate/).
74
+
75
+ ### Languages
76
+
77
+ #### TyDi-AS2 (original)
78
+
79
+ - `bn`: Bengali
80
+ - `en`: English
81
+ - `fi`: Finnish
82
+ - `id`: Indonesian
83
+ - `ja`: Japanese
84
+ - `ko`: Korean
85
+ - `ru`: Russian
86
+ - `sw`: Swahili
87
+
88
+ File location: [`jsonl/original/`](jsonl/original/)
89
+
90
+ For non-English sets, we also have English-translated samples used for the cross-lingual knowledge distillation (CLKD) experiments in our paper.
91
+
92
+ File location: [`jsonl/x-to-en/`](jsonl/x-to-en/)
93
+
94
+ #### Xtr-TyDi-AS2 (translationese)
95
+
96
+ Xtr-TyDi-AS2 (X-translated TyDi-AS2) dataset consists of non-English AS2 instances translated from the English set of TyDi-AS2.
97
+
98
+ - `bn`: Bengali
99
+ - `fi`: Finnish
100
+ - `id`: Indonesian
101
+ - `ja`: Japanese
102
+ - `ko`: Korean
103
+ - `ru`: Russian
104
+ - `sw`: Swahili
105
+
106
+ File location: [`jsonl/en-to-x/`](jsonl/en-to-x/)
107
+
108
+ ## Dataset Structure
109
+
110
+ ### Data Instances
111
+
112
+ This is an example instance from the English training split of TyDi-AS2 dataset.
113
+
114
+ ```
115
+ {
116
+ "Question": "When was the Argentine Basketball Federation formed?",
117
+ "Title": "History of the Argentina national basketball team",
118
+ "Sentence": "The Argentina national basketball team represents Argentina in basketball international competitions, and is controlled by the Argentine Basketball Federation.",
119
+ "Label": 0
120
+ }
121
+ ```
122
+
123
+ For English-translated TyDi-AS2 dataset and Xtr-TyDi-AS2 dataset, the translated instances in JSONL files are listed in the same order of the original (native) instances in the original TyDi-AS2 dataset.
124
+
125
+ For example, the 2nd instance in [`jsonl/x-to-en/en_from_bn-train.jsonl`](jsonl/x-to-en/en_from_bn-train.jsonl) (English-translated from Bengali) corresponds to the 2nd instance in [`jsonl/original/bn-train.jsonl`](jsonl/x-to-en/bn-train.jsonl) (Bengali).
126
+
127
+ Similarly, the 2nd instance in [`jsonl/en-to-x/bn_from_en-train.jsonl`](jsonl/en-to-x/bn_from_en-train.jsonl) (Bengali-translated from English) corresponds to the 2nd instance in [`jsonl/original/en-train.jsonl`](jsonl/en-to-x/en-train.jsonl) (English).
128
+
129
+ ### Data Fields
130
+
131
+ Each instance (a QA pair) consists of the following fields:
132
+
133
+ - `Question`: Question to be answered (str)
134
+ - `Title`: Document title (str)
135
+ - `Sentence`: Answer sentence in the document (str)
136
+ - `Label`: Label that indicates the answer sentence correctly answers the question (int, 1: correct, 0: incorrect)
137
+
138
+
139
+ ### Data Splits
140
+
141
+ | | | **#Questions** | | | | **#Sentences** | |
142
+ |---------------------|----------:|---------------:|---------:|---|----------:|---------------:|---------:|
143
+ | | **train** | **dev** | **test** | | **train** | **dev** | **test** |
144
+ | **Bengali (bn)** | 7,978 | 2,056 | 316 | | 1,376,432 | 351,186 | 37,465 |
145
+ | **English (en)** | 6,730 | 1,686 | 918 | | 1,643,702 | 420,899 | 249,513 |
146
+ | **Finnish (fi)** | 10,859 | 2,731 | 1,870 | | 1,567,695 | 408,205 | 298,093 |
147
+ | **Indonesian (id)** | 9,310 | 2,339 | 1,355 | | 960,270 | 236,076 | 97,057 |
148
+ | **Japanese (ja)** | 11,848 | 2,981 | 1,504 | | 3,183,037 | 822,654 | 444,106 |
149
+ | **Korean (ko)** | 7,354 | 1,943 | 1,389 | | 1,558,191 | 392,361 | 199,043 |
150
+ | **Russian (ru)** | 9,187 | 2,294 | 1,395 | | 3,190,650 | 820,668 | 367,595 |
151
+ | **Swahili (sw)** | 8,350 | 2,850 | 1,896 | | 1,048,303 | 269,894 | 74,775 |
152
+
153
+ See [our paper](#citation-information) for more details about the statistics of the datasets.
154
+
155
+
156
+ ## Dataset Creation
157
+
158
+ ### Source Data
159
+
160
+ The source of TyDi-AS2 dataset is [TyDi QA](https://ai.google.com/research/tydiqa), which is a question answering dataset.
161
+
162
+ ### Annotations
163
+
164
+ #### Annotation process
165
+
166
+ TyDi QA is a QA dataset spanning questions from 11 typologically diverse languages.
167
+ Each instance comprises a human-generated question, a single Wikipedia document as context, and one or more spans from the document containing the answer.
168
+ To convert each instance into AS2 instances, we split the context document into sentences and heuristically identify the correct asnwer sentences using the annotated answer spans.
169
+ To split documents, we use multiple different sentence tokenizers for the diverse languages and omit languages for which we could not find a suitable sentence tokenizer:
170
+ 1. [bltk](https://github.com/saimoncse19/bltk) for Bengali
171
+ 2. [blingfire](https://github.com/microsoft/BlingFire) for Swahili, Indonesian, and Korean
172
+ 3. [pysdb](https://github.com/nipunsadvilkar/pySBD) for English and Russian
173
+ 4. [nltk](https://www.nltk.org/) for Finnish
174
+ 5. [Konoha](https://github.com/himkt/konoha) for Japanese
175
+
176
+ #### Who are the annotators?
177
+
178
+ [Shivanshu Gupta](https://huggingface.co/shivanshu) converted TyDi QA to TyDi-AS2.
179
+ [Yoshitomo Matsubara](https://huggingface.co/yoshitomo-matsubara) translated non-English samples to English and vice versa for Xtr-TyDi-AS2 dataset
180
+ Since sentence tokenization and identifying answer sentences can introduce errors, we conducted a manual validation of the AS2 datasets. For each language, we randomly selected 50 instances and verified the accuracy of the answer sentences through manual inspection. Our findings revealed that the answer sentences were accurate in 98% of the cases.
181
+
182
+ ## Additional Information
183
+
184
+ ### Dataset Curators
185
+
186
+ Shivanshu Gupta (@shivanshu)
187
+
188
+
189
+ ### Licensing Information
190
+
191
+ [CDLA-Permissive-2.0](LICENSE.md)
192
+
193
+ ### Citation Information
194
+
195
+ ```
196
+ @article{gupta2023cross-lingual,
197
+ title={Cross-Lingual Knowledge Distillation for Answer Sentence Selection in Low-Resource Languages},
198
+ author={Gupta, Shivanshu and Matsubara, Yoshitomo and Chadha, Ankit and Moschitti, Alessandro},
199
+ journal={arXiv preprint arXiv:2305.16302},
200
+ year={2023}
201
+ }
202
+ ```
203
+
204
+
205
+ ### Contributions
206
+
207
+ - [Shivanshu Gupta](https://huggingface.co/shivanshu)
208
+ - [Yoshitomo Matsubara](https://huggingface.co/yoshitomo-matsubara)
209
+ - Ankit Chadha
210
+ - Alessandro Moschitti
jsonl/en-to-x/bn_from_en-dev.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e69b4283561e10e8ebbfe86e57a87704367e58968479d4f742cab0faf0f8139a
3
+ size 373410938
jsonl/en-to-x/bn_from_en-test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acdeab8e1cd5a3f7cd9e77785c5813606377497bd4f7951928d0b3eb29e723aa
3
+ size 217152254
jsonl/en-to-x/bn_from_en-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efd0613ea1c05841b7b2d3e8a369ffa3970fc9311f36ef313c6d8c3f430d1004
3
+ size 1455083994
jsonl/en-to-x/fi_from_en-dev.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6490be4ff45d7e877e40f29ee3549c10cbe5ede36a44a12ad259d1a98f62bce
3
+ size 108162302
jsonl/en-to-x/fi_from_en-test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8d941f07d3269118980ead9af8620235f603a0da45c5e49d2a559c3b2dee4eb
3
+ size 63449021
jsonl/en-to-x/fi_from_en-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72fd5abd4f72e861df819f778ed0b82a5cfe2e3e3adf1081b1284d0769bfb544
3
+ size 422302555
jsonl/en-to-x/id_from_en-dev.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65afd10ca1f325cbd7d2be8170b0729bf349215a7de6333e43fb19b0aeecd59e
3
+ size 97173917
jsonl/en-to-x/id_from_en-test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87e5a4e28e77413cd4892a9f88261875fc69f50fe6f5af7ef570ee1c438738ff
3
+ size 56921229
jsonl/en-to-x/id_from_en-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58badb5695d27721a0973e8de5487fa44932f32286f3a035138ba9cb9b9ca878
3
+ size 379564125
jsonl/en-to-x/ja_from_en-dev.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f4feaed86a2be4942abbae1b76f9863d88f034792362466f63aa708d0c8a23e
3
+ size 212412519
jsonl/en-to-x/ja_from_en-test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dffa85c060830aa9ec92827399c2e19ee5400194f91c5e20ff6f2281ee80730
3
+ size 123297551
jsonl/en-to-x/ja_from_en-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f90fabb85133021af8446b7f11a72870de4b7a17832bb0e1339d6128db9fbe4
3
+ size 831401230
jsonl/en-to-x/ko_from_en-dev.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1b363c374c6e76b28d9845eb7f33790472ca0bb8b3185fdb835b28cd1d74f1e
3
+ size 180016904
jsonl/en-to-x/ko_from_en-test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd61cbdee462414b9be5032636b7d67db042f64255036a771203e98e558e904b
3
+ size 105464294
jsonl/en-to-x/ko_from_en-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1b8a633553f50a4a0fd5b483bc7a82d363f4d2339e1932d4e8a8873cd0edfe1
3
+ size 704450542
jsonl/en-to-x/ru_from_en-dev.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79a4dfeba767a718b536c2be6c34ac101f2a33e4702c3d7da08abb89c6c40e01
3
+ size 390557236
jsonl/en-to-x/ru_from_en-test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48231649b516dc5ef52be4c090dd797d715b1cfc3bda91dc8f0aa518086dbd93
3
+ size 230074857
jsonl/en-to-x/ru_from_en-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58fe6fd3607e56d9b114f7931f4faded519192fd6b731cbe81cc4ed1e5d56fd9
3
+ size 1528157703
jsonl/en-to-x/sw_from_en-dev.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d88c2ae52f4f1a98ed5ab9b7125d1de6e5e09a45b41fce36b1fd8ae8fa25e31f
3
+ size 97417874
jsonl/en-to-x/sw_from_en-test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae95e6edf1740cae42658aae5cf791c50936c4ffd8652d5dbba874a5c683c009
3
+ size 57214362
jsonl/en-to-x/sw_from_en-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a099c8e51f46ce0fd07523add7515ff1a1a1b50e969aef3a48592992322d705
3
+ size 380611499
jsonl/original/bn-dev.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22e1b26271452015c22ff997f0c9679a3dc427fe1113c3208852fa3e1931da45
3
+ size 325586506
jsonl/original/bn-test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfb4b5524f4c079eea90d2471221285d3ee533d140808c91e5df3902b21e927e
3
+ size 34531092
jsonl/original/bn-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8c17c483fdfa72ab5addb4becdca0c83fe640ae84c635867e7db4b65e16a991
3
+ size 1278438976
jsonl/original/en-dev.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4338458a354e8ca50d5ded72a79ec964e3d4e1b6fefa181c158d1d1db934d2bf
3
+ size 95482994
jsonl/original/en-test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc7c29582157e04a1ca6ec1c32e524f868dd6fc6c7520e718d10f583bb990ddd
3
+ size 56218047
jsonl/original/en-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cc53889ab7fbfbcbea14ce4a757b59ef43039f2f8ae280f1000d81af2a22788
3
+ size 373045352
jsonl/original/fi-dev.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dc3763fc639e972210aceff89e19b3fe40ef4ecdb4a56f817e78b737a9be94b
3
+ size 108755578
jsonl/original/fi-test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe8d780d5c0f79be86061e5351b96380934843008beab688edb17614df9e0ec8
3
+ size 80353796
jsonl/original/fi-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21026f6e8a22d33f3c580453bf4c301baac48c1957a01ae68224ea1a70e54f04
3
+ size 417621171
jsonl/original/id-dev.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0952cd9687634fc9013637ef247c8dc000148672f1d942d6eecb36d8f410fbce
3
+ size 63926240
jsonl/original/id-test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15b803dc80a1746627b249a6eddab71b4bd5325d5ab2c25145f5b441656c3d5d
3
+ size 26467145
jsonl/original/id-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:761227de4209a4f88976c5eabdd07fc2313bbf102d48e8ddd2a7a892c1c99d28
3
+ size 259480712
jsonl/original/ja-dev.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64105967f614859729a938735cafbd7cddbad4dbaf74c2cde36d9d284485fdc6
3
+ size 371458468
jsonl/original/ja-test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:264fcc88878146ca42ecfd90595951c2b7429fe14e9b677632a243dc18752e2e
3
+ size 202781234
jsonl/original/ja-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:767e78fd5da9ebc17113e1deb57e37427b56b5ea25789e291ca621cb02527159
3
+ size 1433844190
jsonl/original/ko-dev.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c148228178b64419baf9c67efc8b31e5eadf6a8b43eba11db56bfab41ce4fde2
3
+ size 217354439
jsonl/original/ko-test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9dbe7259f96d31d31e6386dc093e1c0b49489365f87c8053a7c41e5b91348630
3
+ size 113578007
jsonl/original/ko-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d0d9ceb1c6ce713e68e52475878fd4add607fa928e4c3ae21c48633317a4315
3
+ size 865997444
jsonl/original/ru-dev.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41fcda0a7f161a9fd7101901917b42cdbdf9719ecb55de46c5e365cd5e8e1517
3
+ size 781825387
jsonl/original/ru-test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ca30a2d40a85a0902817ff16daa822e90df3a900e1338db8dae93a31eb99b14
3
+ size 338156334
jsonl/original/ru-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:520ba5bc844e0f35601404372cd5ddb18a0908f8ec5a86a85ab658f87b5e0e7c
3
+ size 3014679374
jsonl/original/sw-dev.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc6a11fd33375a21f43f2a89e3a667190a420feeb6b63f5a620639672a85cb14
3
+ size 70587197
jsonl/original/sw-test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca1f3181d8c32c25e614c8d9cf9aecf8cad6f85412f3439d38e97b52a4796eb9
3
+ size 19194629
jsonl/original/sw-train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:984b34392682e76a927400b23c8dad034f42768ad152f966319215573718a89c
3
+ size 275653959
jsonl/x-to-en/en_from_bn-dev.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c67344a646f64727eec910ade9f0e30e6f7c7f69fbf7f3c5d57fee29050e9c5
3
+ size 85132900
jsonl/x-to-en/en_from_bn-test.jsonl ADDED
The diff for this file is too large to render. See raw diff