pip / README.md
lysandre's picture
lysandre HF staff
Upload dataset
063d038 verified
|
raw
history blame
1.89 kB
---
dataset_info:
features:
- name: day
dtype: string
- name: num_downloads
dtype: int64
splits:
- name: accelerate
num_bytes: 27390
num_examples: 1245
- name: datasets
num_bytes: 27390
num_examples: 1245
- name: diffusers
num_bytes: 16764
num_examples: 762
- name: evaluate
num_bytes: 19360
num_examples: 880
- name: gradio
num_bytes: 30756
num_examples: 1398
- name: huggingface_hub
num_bytes: 28270
num_examples: 1285
- name: optimum
num_bytes: 22374
num_examples: 1017
- name: peft
num_bytes: 11616
num_examples: 528
- name: pytorch_image_models
num_bytes: 30756
num_examples: 1398
- name: safetensors
num_bytes: 12826
num_examples: 583
- name: tokenizers
num_bytes: 30756
num_examples: 1398
- name: transformers
num_bytes: 31438
num_examples: 1429
- name: sentence_transformers
num_bytes: 5742
num_examples: 261
download_size: 171911
dataset_size: 295438
configs:
- config_name: default
data_files:
- split: accelerate
path: data/accelerate-*
- split: datasets
path: data/datasets-*
- split: diffusers
path: data/diffusers-*
- split: evaluate
path: data/evaluate-*
- split: gradio
path: data/gradio-*
- split: huggingface_hub
path: data/huggingface_hub-*
- split: optimum
path: data/optimum-*
- split: peft
path: data/peft-*
- split: pytorch_image_models
path: data/pytorch_image_models-*
- split: safetensors
path: data/safetensors-*
- split: tokenizers
path: data/tokenizers-*
- split: transformers
path: data/transformers-*
- split: sentence_transformers
path: data/sentence_transformers-*
---
# Dataset Card for "pip"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)