File size: 9,857 Bytes
cd2355c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 |
import gradio as gr
import pandas as pd
import random
import time
from info.train_a_model import (
LLM_BENCHMARKS_TEXT)
from info.submit import (
SUBMIT_TEXT)
from info.deployment import (
DEPLOY_TEXT)
from info.programs import (
PROGRAMS_TEXT)
from info.citation import(
CITATION_TEXT)
from src.processing import filter_benchmarks_table, make_clickable
demo = gr.Blocks()
with demo:
gr.HTML("""<h1 align="center" id="space-title">🤗Powered-by-Intel LLM Leaderboard 💻</h1>""")
gr.Markdown("This leaderboard is designed to evaluate, score, and rank open-source large language \
models that have been pre-trained or fine-tuned on Intel Hardware 🦾")
gr.Markdown("Models submitted to the leaderboard are evaluated \
on the Intel Developer Cloud ☁️")
# TODO: Coming soon comparison tool
#with gr.Accordion("🥊Large Language Model Boxing Ring 🥊", open=False):
# with gr.Row():
# chat_a = gr.Chatbot()
# chat_b = gr.Chatbot()
# msg = gr.Textbox()
# gr.ClearButton([msg, chat_a])
#
# def respond(message, chat_history):
# bot_message = random.choice(["How are you?", "I love you", "I'm very hungry"])
# chat_history.append((message, bot_message))
# time.sleep(2)
# return "", chat_history
#
# msg.submit(respond, inputs = [msg, chat_a],outputs = [msg, chat_a])
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("🏆 LLM Benchmark", elem_id="llm-benchmark-table", id=0):
with gr.Row():
with gr.Column():
filter_hw = gr.CheckboxGroup(choices=["Gaudi","Xeon","GPU Max","Arc GPU","Core Ultra"],
label="Select Training Platform*",
elem_id="compute_platforms",
value=["Gaudi","Xeon","GPU Max","Arc GPU","Core Ultra"])
filter_platform = gr.CheckboxGroup(choices=["Intel Developer Cloud","AWS","Azure","GCP","Local"],
label="Training Infrastructure*",
elem_id="training_infra",
value=["Intel Developer Cloud","AWS","Azure","GCP","Local"])
filter_affiliation = gr.CheckboxGroup(choices=["No Affiliation","Intel Innovator","Intel Student Ambassador", "Intel Software Liftoff", "Intel Labs", "Other"],
label="Intel Program Affiliation",
elem_id="program_affiliation",
value=["No Affiliation","Intel Innovator","Intel Student Ambassador", "Intel Software Liftoff", "Intel Labs", "Other"])
with gr.Column():
filter_size = gr.CheckboxGroup(choices=[1,3,5,7,13,35,60,70,100],
label="Model Sizes (Billion of Parameters)",
elem_id="parameter_size",
value=[1,3,5,7,13,35,60,70,100])
filter_precision = gr.CheckboxGroup(choices=["fp8","fp16","bf16","int8","4bit"],
label="Model Precision",
elem_id="precision",
value=["fp8","fp16","bf16","int8","4bit"])
filter_type = gr.CheckboxGroup(choices=["pretrained","fine-tuned","chat-models","merges/moerges"],
label="Model Types",
elem_id="model_types",
value=["pretrained","fine-tuned","chat-models","merges/moerges"])
initial_df = pd.read_csv("leaderboard_status_030424.csv")
gradio_df_display = gr.Dataframe()
def update_df(hw_selected, platform_selected, affiliation_selected, size_selected, precision_selected, type_selected):
filtered_df = filter_benchmarks_table(df=initial_df, hw_selected=hw_selected, platform_selected=platform_selected,
affiliation_selected=affiliation_selected, size_selected=size_selected,
precision_selected=precision_selected, type_selected=type_selected)
return filtered_df
filter_hw.change(fn=update_df,
inputs=[filter_hw, filter_platform, filter_affiliation, filter_size, filter_precision, filter_type],
outputs=[gradio_df_display])
filter_platform.change(fn=update_df,
inputs=[filter_hw, filter_platform, filter_affiliation, filter_size, filter_precision, filter_type],
outputs=[gradio_df_display])
filter_affiliation.change(fn=update_df,
inputs=[filter_hw, filter_platform, filter_affiliation, filter_size, filter_precision, filter_type],
outputs=[gradio_df_display])
filter_size.change(fn=update_df,
inputs=[filter_hw, filter_platform, filter_affiliation, filter_size, filter_precision, filter_type],
outputs=[gradio_df_display])
filter_precision.change(fn=update_df,
inputs=[filter_hw, filter_platform, filter_affiliation, filter_size, filter_precision, filter_type],
outputs=[gradio_df_display])
filter_type.change(fn=update_df,
inputs=[filter_hw, filter_platform, filter_affiliation, filter_size, filter_precision, filter_type],
outputs=[gradio_df_display])
with gr.TabItem("🧰 Train a Model", elem_id="getting-started", id=1):
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
with gr.TabItem("🚀 Deployment Tips", elem_id="deployment-tips", id=2):
gr.Markdown(DEPLOY_TEXT, elem_classes="markdown-text")
with gr.TabItem("👩💻 Developer Programs", elem_id="hardward-program", id=3):
gr.Markdown(PROGRAMS_TEXT, elem_classes="markdown-text")
with gr.TabItem("🏎️ Submit", elem_id="submit", id=4):
gr.Markdown(SUBMIT_TEXT, elem_classes="markdown-text")
with gr.Row():
gr.Markdown("# Submit Model for Evaluation 🏎️", elem_classes="markdown-text")
with gr.Row():
with gr.Column():
model_name_textbox = gr.Textbox(label="Model name")
revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
model_type = gr.Dropdown(
choices=["pretrained","fine-tuned","chat models","merges/moerges"],
label="Model type",
multiselect=False,
value="pretrained",
interactive=True,
)
hw_type = gr.Dropdown(
choices=["Gaudi","Xeon","GPU Max","Arc GPU"],
label="Training Hardware",
multiselect=False,
value="Gaudi2",
interactive=True,
)
terms = gr.Checkbox(
label="Check if you have read and agreed to terms and conditions associated with submitting\
a model to the leaderboard.",
value=False,
interactive=True,
)
with gr.Column():
precision = gr.Dropdown(
choices=["fp8","fp16","bf16","int8","4bit"],
label="Precision",
multiselect=False,
value="fp16",
interactive=True,
)
weight_type = gr.Dropdown(
choices=["Original", "Adapter", "Delta"],
label="Weights type",
multiselect=False,
value="Original",
interactive=True,
)
training_infra = gr.Dropdown(
choices=["IDC","AWS","Azure","GCP","Local"],
label="Training Infrastructure",
multiselect=False,
value="IDC",
interactive=True,
)
affiliation = gr.Dropdown(
choices=["No Affiliation","Innovator","Student Ambassador","Intel Liftoff", "Intel Labs", "Other"],
label="Affiliation with Intel",
multiselect=False,
value="Independent",
interactive=True,
)
base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
#submit_button = gr.Button("Submit Eval")
#submission_result = gr.Markdown()
gr.Markdown("Community Submissions Coming soon!")
with gr.Accordion("📙 Citation", open=False):
citation =gr.Textbox(value = CITATION_TEXT,
lines=6,
label="Use the following to cite this content")
demo.launch() |