--- configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* dataset_info: features: - name: example_id dtype: int64 - name: original_sentence dtype: string - name: page_title dtype: string - name: section_title sequence: string - name: paragraph_text dtype: string - name: sentence_start_byte_offset dtype: int64 - name: sentence_end_byte_offset dtype: int64 - name: article_url dtype: string - name: annotations list: - name: category dtype: string - name: decontextualized_sentence dtype: string - name: example_id dtype: int64 - name: original_sentence dtype: string - name: worker_id dtype: int64 splits: - name: train num_bytes: 15224065 num_examples: 11290 - name: validation num_bytes: 5315183 num_examples: 1945 - name: test num_bytes: 5359001 num_examples: 1945 download_size: 13617475 dataset_size: 25898249 --- # Dataset Card for "decontextualization" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)