Datasets:
metadata
language:
- en
license: mit
size_categories:
- 10K<n<100K
task_categories:
- text2text-generation
dataset_info:
- config_name: default
features:
- name: pageid
dtype: int64
- name: title
dtype: string
- name: revid
dtype: int64
- name: description
dtype: string
- name: categories
sequence: string
- name: inputs
dtype: string
- name: targets
dtype: string
splits:
- name: train
num_bytes: 829905155
num_examples: 44754
download_size: 489718761
dataset_size: 829905155
- config_name: instruct
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _task_name
dtype: string
splits:
- name: train
num_bytes: 815113125
num_examples: 44754
download_size: 480497634
dataset_size: 815113125
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- config_name: instruct
data_files:
- split: train
path: instruct/train-*
source_datasets: euirim/goodwiki
goodwiki-text2text-completion
Wikipedia articles split on paragraphs and/or markdown headers pseudo-randomly.
- inputs/outputs are in columns
inputs
/targets
- see the
instruct
config with prompts prepended to the text ininputs
- Source data:
euirim/goodwiki
dataset_info:
features:
- name: pageid
dtype: int64
- name: title
dtype: string
- name: revid
dtype: int64
- name: description
dtype: string
- name: categories
sequence: string
- name: inputs
dtype: string
- name: targets
dtype: string