Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
natural-language-inference
Languages:
English
Size:
10K - 100K
License:
File size: 3,193 Bytes
65d5cea 849f2b3 f5ad723 849f2b3 6976199 849f2b3 6976199 18862de 6976199 e609c14 18862de 6976199 e609c14 18862de 0c2cc23 1f5b85d f5ad723 1f5b85d 0c2cc23 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
---
annotations_creators:
- expert-generated
language_creators:
- crowdsourced
language:
- en
license: bsd
multilinguality:
- monolingual
size_categories:
- 1K<n<10K
source_datasets:
- original
task_categories:
- text-classification
task_ids:
- natural-language-inference
pretty_name: babi_nli
tags:
- logical reasoning
- nli
- natural-language-inference
- reasoning
- logic
dataset_info:
- config_name: single-supporting-fact
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype:
class_label:
names:
'0': not-entailed
'1': entailed
- name: idx
dtype: int32
splits:
- name: train
num_bytes: 223761
num_examples: 1000
- name: validation
num_bytes: 112784
num_examples: 500
- name: test
num_bytes: 111569
num_examples: 500
download_size: 91968
dataset_size: 448114
- config_name: three-supporting-facts
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype:
class_label:
names:
'0': not-entailed
'1': entailed
- name: idx
dtype: int32
splits:
- name: train
num_bytes: 1479097
num_examples: 1000
- name: validation
num_bytes: 783450
num_examples: 500
- name: test
num_bytes: 735719
num_examples: 500
download_size: 558073
dataset_size: 2998266
- config_name: two-supporting-facts
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype:
class_label:
names:
'0': not-entailed
'1': entailed
- name: idx
dtype: int32
splits:
- name: train
num_bytes: 507085
num_examples: 1000
- name: validation
num_bytes: 261742
num_examples: 500
- name: test
num_bytes: 244322
num_examples: 500
download_size: 194986
dataset_size: 1013149
configs:
- config_name: single-supporting-fact
data_files:
- split: train
path: single-supporting-fact/train-*
- split: validation
path: single-supporting-fact/validation-*
- split: test
path: single-supporting-fact/test-*
- config_name: three-supporting-facts
data_files:
- split: train
path: three-supporting-facts/train-*
- split: validation
path: three-supporting-facts/validation-*
- split: test
path: three-supporting-facts/test-*
- config_name: two-supporting-facts
data_files:
- split: train
path: two-supporting-facts/train-*
- split: validation
path: two-supporting-facts/validation-*
- split: test
path: two-supporting-facts/test-*
---
# bAbi_nli
bAbI tasks recasted as natural language inference.
https://github.com/facebookarchive/bAbI-tasks
tasksource recasting code:
https://colab.research.google.com/drive/1J_RqDSw9iPxJSBvCJu-VRbjXnrEjKVvr?usp=sharing
```bibtex
@article{weston2015towards,
title={Towards ai-complete question answering: A set of prerequisite toy tasks},
author={Weston, Jason and Bordes, Antoine and Chopra, Sumit and Rush, Alexander M and Van Merri{\"e}nboer, Bart and Joulin, Armand and Mikolov, Tomas},
journal={arXiv preprint arXiv:1502.05698},
year={2015}
}
``` |