# Constants for ImageNet-1k Leaderboard
BANNER = """
🏆 ImageNet-1k Leaderboard
Compare computer vision models on ImageNet-1k classification
"""
INTRODUCTION_TEXT = """
# ImageNet-1k Leaderboard
Welcome to the ImageNet-1k Leaderboard! This leaderboard tracks the performance of various computer vision models on the ImageNet-1k dataset, which contains 1.2 million training images across 1000 classes.
## Key Metrics
- **Top-1 Accuracy**: Percentage of images where the model's top prediction is correct
- **Top-5 Accuracy**: Percentage of images where the correct class is among the top 5 predictions
- **Parameters**: Number of trainable parameters in the model
- **FLOPs**: Floating point operations required for inference
- **Inference Time**: Average time per image (if available)
## Dataset
ImageNet-1k is a subset of the ImageNet dataset containing:
- **Training set**: 1.2M images
- **Validation set**: 50K images
- **Classes**: 1000 object categories
- **Image size**: Variable (typically resized to 224x224 or 384x384)
## Hardware Configuration
All results are tested on **NVIDIA L4 GPU** to ensure consistent and fair comparison across models.
The leaderboard is sorted by Top-1 Accuracy (descending) as the primary metric.
"""
CITATION_TEXT = """@article{imagenet,
title={ImageNet: A large-scale hierarchical image database},
author={Deng, Jia and Dong, Wei and Socher, Richard and Li, Li-Jia and Li, Kai and Fei-Fei, Li},
journal={2009 IEEE conference on computer vision and pattern recognition},
pages={248--255},
year={2009},
organization={IEEE}
}"""
METRICS_TAB_TEXT = """
# Evaluation Metrics
## Hardware Configuration
All models are evaluated on **NVIDIA L4 GPU** to ensure consistent and fair comparison across different architectures.
## Top-1 Accuracy
The percentage of test images for which the model's highest confidence prediction matches the ground truth label.
## Top-5 Accuracy
The percentage of test images for which the ground truth label appears in the model's top 5 highest confidence predictions.
## Parameters
The total number of trainable parameters in the model. This gives an indication of model complexity and size.
## FLOPs (Floating Point Operations)
The number of floating point operations required for a single forward pass through the model. This is a measure of computational complexity.
## Inference Time
The average time required to process a single image on NVIDIA L4 GPU. This metric helps compare the computational efficiency of different models.
## Model Size
The size of the model file in MB or GB, indicating storage requirements.
"""
# Directory for storing evaluation requests
from pathlib import Path
DIR_OUTPUT_REQUESTS = Path("evaluation_requests")
# CSS styling for the leaderboard
LEADERBOARD_CSS = """
.leaderboard-table {
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
}
.leaderboard-table th {
background-color: #f8f9fa;
font-weight: bold;
text-align: center;
padding: 12px;
border: 1px solid #dee2e6;
}
.leaderboard-table td {
text-align: center;
padding: 8px 12px;
border: 1px solid #dee2e6;
}
.leaderboard-table tr:nth-child(even) {
background-color: #f8f9fa;
}
.leaderboard-table tr:hover {
background-color: #e9ecef;
}
.markdown-text {
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
line-height: 1.6;
}
.tab-buttons {
margin-bottom: 20px;
}
#banner {
text-align: center;
margin-bottom: 30px;
padding: 20px;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
border-radius: 10px;
}
#show-proprietary-checkbox {
margin-top: 10px;
}
"""