File size: 2,077 Bytes
3487bf5 041600c 67247af 3487bf5 67247af 3487bf5 67247af 3487bf5 67247af 3487bf5 67247af 3487bf5 67247af 3487bf5 67247af 3487bf5 67247af 3487bf5 67247af 3487bf5 67247af 3487bf5 67247af cda417f 67247af 041600c 67247af 041600c 67247af 041600c 67247af 041600c 67247af ad565f1 3487bf5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
---
dataset_info:
features:
- name: login
dtype: string
- name: dates
dtype: string
splits:
- name: transformers
num_bytes: 4418033
num_examples: 118397
- name: peft
num_bytes: 445862
num_examples: 11988
- name: evaluate
num_bytes: 62496
num_examples: 1669
- name: huggingface_hub
num_bytes: 54110
num_examples: 1438
- name: accelerate
num_bytes: 235863
num_examples: 6321
- name: datasets
num_bytes: 670437
num_examples: 17855
- name: optimum
num_bytes: 70249
num_examples: 1874
- name: pytorch_image_models
num_bytes: 1056523
num_examples: 28333
- name: gradio
num_bytes: 947321
num_examples: 25460
- name: tokenizers
num_bytes: 296887
num_examples: 8005
- name: diffusers
num_bytes: 753980
num_examples: 20268
- name: safetensors
num_bytes: 72211
num_examples: 1947
- name: candle
num_bytes: 423910
num_examples: 11435
- name: text_generation_inference
num_bytes: 244550
num_examples: 6566
- name: chat_ui
num_bytes: 174602
num_examples: 4697
- name: hub_docs
num_bytes: 7984
num_examples: 213
download_size: 6034494
dataset_size: 9935018
configs:
- config_name: default
data_files:
- split: peft
path: data/peft-*
- split: hub_docs
path: data/hub_docs-*
- split: evaluate
path: data/evaluate-*
- split: huggingface_hub
path: data/huggingface_hub-*
- split: accelerate
path: data/accelerate-*
- split: datasets
path: data/datasets-*
- split: optimum
path: data/optimum-*
- split: pytorch_image_models
path: data/pytorch_image_models-*
- split: gradio
path: data/gradio-*
- split: tokenizers
path: data/tokenizers-*
- split: diffusers
path: data/diffusers-*
- split: transformers
path: data/transformers-*
- split: safetensors
path: data/safetensors-*
---
# Dataset Card for "stars"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |