pip / README.md
lysandre's picture
lysandre HF staff
Upload dataset
bbcc9e6 verified
---
dataset_info:
features:
- name: day
dtype: string
- name: num_downloads
dtype: int64
splits:
- name: accelerate
num_bytes: 28688
num_examples: 1304
- name: datasets
num_bytes: 28688
num_examples: 1304
- name: diffusers
num_bytes: 18062
num_examples: 821
- name: evaluate
num_bytes: 20658
num_examples: 939
- name: gradio
num_bytes: 32054
num_examples: 1457
- name: huggingface_hub
num_bytes: 29568
num_examples: 1344
- name: optimum
num_bytes: 23672
num_examples: 1076
- name: peft
num_bytes: 12914
num_examples: 587
- name: pytorch_image_models
num_bytes: 32054
num_examples: 1457
- name: safetensors
num_bytes: 14124
num_examples: 642
- name: tokenizers
num_bytes: 32054
num_examples: 1457
- name: transformers
num_bytes: 32736
num_examples: 1488
- name: sentence_transformers
num_bytes: 7040
num_examples: 320
download_size: 180766
dataset_size: 312312
configs:
- config_name: default
data_files:
- split: accelerate
path: data/accelerate-*
- split: datasets
path: data/datasets-*
- split: diffusers
path: data/diffusers-*
- split: evaluate
path: data/evaluate-*
- split: gradio
path: data/gradio-*
- split: huggingface_hub
path: data/huggingface_hub-*
- split: optimum
path: data/optimum-*
- split: peft
path: data/peft-*
- split: pytorch_image_models
path: data/pytorch_image_models-*
- split: safetensors
path: data/safetensors-*
- split: tokenizers
path: data/tokenizers-*
- split: transformers
path: data/transformers-*
- split: sentence_transformers
path: data/sentence_transformers-*
---
# Dataset Card for "pip"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)