--- configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* - split: validation path: data/validation-* dataset_info: features: - name: source dtype: string - name: source_labels dtype: string - name: rouge_scores dtype: string - name: paper_id dtype: string - name: target dtype: string - name: full_source_text dtype: string - name: input_ids sequence: int32 - name: attention_mask sequence: int8 - name: labels sequence: int64 splits: - name: train num_bytes: 17340567 num_examples: 1992 - name: test num_bytes: 5620222 num_examples: 618 - name: validation num_bytes: 5534448 num_examples: 619 download_size: 6328102 dataset_size: 28495237 --- # Dataset Card for "tokenized_dataset_bart" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)