--- dataset_info: features: - name: _id dtype: string - name: url dtype: string - name: title dtype: string - name: text dtype: string - name: emb sequence: float32 - name: tokens_count dtype: int64 splits: - name: train num_bytes: 459777479.8019841 num_examples: 100000 download_size: 233856601 dataset_size: 459777479.8019841 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "wikipedia-2023-11-pt-tokens-100000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)