pszemraj's picture
Upload dataset
003c300
|
raw
history blame
1.68 kB
---
license: apache-2.0
size_categories:
- 100K<n<1M
source_datasets: pacovaldez/stackoverflow-questions
task_categories:
- text-classification
- text-generation
dataset_info:
- config_name: default
features:
- name: title
dtype: string
- name: body
dtype: string
- name: label
dtype: int64
- name: token_count
dtype: int64
splits:
- name: train
num_bytes: 1082904744
num_examples: 212663
- name: validation
num_bytes: 25509099.6585352
num_examples: 5000
- name: test
num_bytes: 25510304.23774933
num_examples: 5000
download_size: 461549130
dataset_size: 1133924147.8962846
- config_name: original
features:
- name: title
dtype: string
- name: body
dtype: string
- name: label
dtype: int64
- name: token_count
dtype: int64
splits:
- name: train
num_bytes: 1082904744
num_examples: 212663
- name: validation
num_bytes: 539369505
num_examples: 105721
- name: test
num_bytes: 1078141988
num_examples: 211315
download_size: 1099545678
dataset_size: 2700416237
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
- config_name: original
data_files:
- split: train
path: original/train-*
- split: validation
path: original/validation-*
- split: test
path: original/test-*
---
# stackoverflow questions for text classification: 'long'
This is `pacovaldez/stackoverflow-questions` filtered for 1024 GPT2 tokens or more in `title` + `body`
https://huggingface.co/datasets/pacovaldez/stackoverflow-questions
---