Spaces:
Running
Running
File size: 6,622 Bytes
e83848d f216956 9cb5506 f216956 e83848d f216956 e83848d 9cb5506 e83848d f216956 9cb5506 f216956 9cb5506 f216956 9cb5506 f216956 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 |
home_icon = "https://img.shields.io/badge/Website-Page-blue?logo=homeassistant&logoColor=white&style=flat-square"
arxiv_icon = "https://img.shields.io/badge/ArXiv-Paper-b31b1b?logo=arxiv&logoColor=white&style=flat-square"
github_icon = "https://img.shields.io/badge/GitHub-Repo-181717?logo=github&logoColor=white&style=flat-square"
def hyperlink(name, link):
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); \
text-decoration: underline;text-decoration-style: dotted;">{name}</a>'
def get_title_md():
md = f'''
# 🏆 Leaderboard for 3D Generative Models
'''
return md
def get_intro_md():
md = '''
This leaderboard provides a centralized evaluation platform for evaluating and tracking the performance of 3D generation models.
'''
return md
def get_model_intro_md():
md = '''
This leaderboard spans a diverse set of state-of-the-art 3D generation models, including different conditional settings such as images, text, or combinations thereof.
'''
return md
def get_model_description_md(model_config, cols=10, except_models=[]):
model_list = {}
for cfg in model_config.values():
task = cfg.task
model_name = cfg.model_name
model_link = cfg.page_link if cfg.page_link else cfg.code_link
if task not in model_list.keys():
model_list[task] = set()
if model_name not in except_models:
model_list[task].add(hyperlink(model_name, model_link))
model_descriptions = ""
for task, models in model_list.items():
model_descriptions += f"\n**{len(models)} {task} Generative Models**\n"
## model_table
model_descriptions += '<table style="width:100%; text-align:left; border:none; border-collapse: collapse;">\n'
for i, model in enumerate(models):
if i%cols == 0:
model_descriptions += ' <tr>\n'
model_descriptions += f' <td>{model}</td>\n'
if (i+1)%cols == 0:
model_descriptions += ' </tr>\n'
if len(models)%cols != 0:
num_pad = cols - len(models)%cols
model_descriptions += ' <td></td>\n' * num_pad
model_descriptions += ' </tr>\n'
model_descriptions += '</table>\n'
return model_descriptions.strip()
def get_object_dimension_intro_md():
md = f'''
Each model involved is conducted under consistent and standardized settings and assessed along **multiple evaluation dimensions** to provide a detailed view of its strengths and limitations:
'''
return md
def get_object_dimension_description_md():
md = f'''
- **Geometry Plausibility** assesses the structural integrity and physical feasibility of the generated shape.
- **Geometry Details** reflects the fidelity of fine-scale structures, such as sharp edges and part boundaries.
- **Texture Quality** evaluates the visual fidelity of surface textures in terms of resolution, realism, and aesthetic consistency.
- **Geometry-Texture Coherency** assesses the alignment between texture and shape—whether textures follow the contours, part boundaries, and material semantics of geometry.
- **Prompt-3D Alignment** evaluates the semantic and/or identity consistency between the input prompt and the generated 3D asset.
'''
return md
def get_leaderboard_intro_md():
md = '''
This leaderboard integrates results from three complementary benchmarks that span different aspects of 3D synthesis.
- [Hi3DEval](https://zyh482.github.io/Hi3DEval/)
- [3DGenBench](https://zyh482.github.io/3DGen-Bench/)
- [GPTEval3D](https://github.com/3DTopia/GPTEval3D)
'''
return md
def get_hi3deval_intro_md():
md = f'''
This leaderboard is evaluated using **Hi3DEval**, a straight forward scoring benchmark that does **not rely on pairwise comparisons**.
<div style="display: flex; gap: 10px; align-items: center;">
<a href="https://zyh482.github.io/Hi3DEval/" target="_blank"><img src="{home_icon}"></a>
<a href="https://arxiv.org/abs/2508.05609" target="_blank"> <img src="{arxiv_icon}"></a>
</div>
Specifically, each dimension is assigned an absolute score within clearly defined value ranges:
- Geometry Plausibility: range [0, 9]
- Geometry Details: range [0, 4]
- Texture Quality: range [0, 4]
- Geometry-Texture Coherency: range [0, 1]
- Prompt-3D Alignment: range [0, 4]
The **Overall Score** is computed as the **SUM** of the scores across all five dimensions.
Hi3DEval supports unified evaluation for both **Text-to-3D** and **Image-to-3D** generation tasks. You can also freely select **"Task"** to explore performance under different input modalities.
'''
return md
def get_citation_md(name):
citations = {
"hi3deval": '''
```bibtex
@misc{zhang2025hi3devaladvancing3dgeneration,
title={Hi3DEval: Advancing 3D Generation Evaluation with Hierarchical Validity},
author={Yuhan Zhang and Long Zhuo and Ziyang Chu and Tong Wu and Zhibing Li and Liang Pan and Dahua Lin and Ziwei Liu},
year={2025},
eprint={2508.05609},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2508.05609},
}
''',
"3dgen-bench": '''
```bibtex
@article{zhang20253dgen,
title={3DGen-Bench: Comprehensive Benchmark Suite for 3D Generative Models},
author={Zhang, Yuhan and Zhang, Mengchen and Wu, Tong and Wang, Tengfei and Wetzstein, Gordon and Lin, Dahua and Liu, Ziwei},
journal={arXiv preprint arXiv:2503.21745},
year={2025}
}
''',
"gpteval3d": '''
```bibtex
@inproceedings{wu2024gpt,
title={Gpt-4v (ision) is a human-aligned evaluator for text-to-3d generation},
author={Wu, Tong and Yang, Guandao and Li, Zhibing and Zhang, Kai and Liu, Ziwei and Guibas, Leonidas and Lin, Dahua and Wetzstein, Gordon},
booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition},
pages={22227--22238},
year={2024}
}
'''
}
md = f"Reference:\n{citations[name.lower()]}"
return md
def get_ack_md():
md = f"""
This project is supported by the Shanghai Artificial Intelligence Laboratory.
We also acknowledge the Hugging Face platform and the broader open-source community in 3D generation, whose contributions have helped shape the ecosystem that makes this work possible.
We are currently working on open-sourcing the automatic evaluation pipeline, and we welcome the inclusion of more 3D generation models, including both open-source and closed-source solutions.
We welcome feedback, model contributions, and potential collaborations. Please contact us at yhzhang4778@gmail.com.
"""
return md |