zhangzicheng's picture
Update app.py
deb247d verified
raw
history blame
2.94 kB
import gradio as gr
import pandas as pd
block = gr.Blocks(title="Q-Bench Leaderboard")
LEADERBORAD_INTRODUCTION = """# Q-Bench Leaderboard
<img style="width:40%" src="https://raw.githubusercontent.com/Q-Future/Q-Bench/master/logo.png">
**[ICLR 2024 Spotlight]** *"How do multi-modaility LLMs perform on low-level computer vision?"*
πŸ† Welcome to the leaderboard of the **Q-Bench**! *A Comprehensive Benchmark Suite for General-purpose Foundation Models on Low-level Vision*
<div style="display: flex; flex-wrap: wrap; align-items: center; gap: 10px;">
<a href="https://github.com/Q-Future/"><img src="https://hits.seeyoufarm.com/api/count/incr/badge.svg?url=https%3A%2F%2Fgithub.com%2Fvqassessment%2FQ-Bench&count_bg=%23E97EBA&title_bg=%23555555&icon=&icon_color=%23E7E7E7&title=visitors&edge_flat=false"/></a>
<a href="https://github.com/Q-Future/Q-Bench"><img src="https://img.shields.io/github/stars/Q-Future/Q-Bench"/></a>
<a href="https://arxiv.org/abs/2309.14181"><img src="https://img.shields.io/badge/Arxiv-2309:14181-red"/></a>
<a href="https://github.com/Q-Future/Q-Bench/releases/tag/v1.0.1.1014datarelease"><img src="https://img.shields.io/badge/Data-Release-green"></a>
<a href="https://github.com/Q-Future/Q-Instruct"><img src="https://img.shields.io/badge/Awesome-QInstruct-orange"/></a>
</div>
- **Low-level Visual Perception (A1):** Open-range multi-choice questions on low-level visual perception. Dataset: [LLVisionQA](https://huggingface.co/datasets/teowu/LLVisionQA-QBench)
- **Low-level Visual Description (A2):** Detailed description on low-level visual attributes. Dataset: [LLDescribe](https://huggingface.co/datasets/teowu/LLDescribe-QBench)
- **Visual Quality Assessment (A3):** MLLMs can give a *precise visual quality score* via *logprobs*!
Right now we only include results validated in our paper. We will allow user submission soon.
"""
with block:
gr.Markdown(
LEADERBORAD_INTRODUCTION
)
with gr.Tab("Perception (A1-Single, dev)"):
gr.DataFrame(pd.read_csv("qbench_a1_single_dev.csv"))
with gr.Tab("Perception (A1-Single, test)"):
gr.DataFrame(pd.read_csv("qbench_a1_single_test.csv"))
with gr.Tab("Perception (A1-Pair, dev)"):
gr.DataFrame(pd.read_csv("qbench_a1_pair_dev.csv"))
with gr.Tab("Perception (A1-Pair, test)"):
gr.DataFrame(pd.read_csv("qbench_a1_pair_test.csv"))
with gr.Tab("Description (A2-Single)"):
gr.DataFrame(pd.read_csv("qbench_a2_single.csv"))
with gr.Tab("Description (A2-Pair)"):
gr.DataFrame(pd.read_csv("qbench_a2_pair.csv"))
with gr.Tab("Assessment (A3)"):
gr.DataFrame(pd.read_csv("qbench_a3_single.csv"))
with gr.Tab("Submit to Q-Bench (coming soon!)"):
gr.Markdown(
"We will release the submission guidance here soon!"
)
block.launch(share=True)