File size: 2,518 Bytes
95c19d6 2a5f9fb 8b88d2c ffe4d51 20fd212 8b88d2c ffe4d51 8c49cb6 ffe4d51 95c19d6 2a73469 ffe4d51 8b88d2c 95c19d6 af9288c 95c19d6 ffe4d51 8b88d2c d084b26 ffe4d51 20fd212 95c19d6 ffe4d51 7dd405e 95c19d6 20fd212 95c19d6 8b88d2c 20fd212 ffe4d51 95c19d6 20fd212 95c19d6 ffe4d51 95c19d6 ffe4d51 95c19d6 ffe4d51 20fd212 95c19d6 8b88d2c 20fd212 ffe4d51 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
from functools import partial
import gradio as gr
from apscheduler.schedulers.background import BackgroundScheduler
import main_backend_toxicity
from src.display.css_html_js import dark_mode_gradio_js
from src.display.log_visualizer import log_file_to_html_string
from src.envs import REFRESH_RATE, REPO_ID, REQUESTS_REPO, RESULTS_REPO
from src.logging import configure_root_logger, setup_logger, log_file
configure_root_logger()
logger = setup_logger(__name__)
HF_URL = "https://huggingface.co"
REFRESH_VISUAL = 10
intro_md = f"""
# Intro
This is a visual for the auto evaluator.
"""
links_md = f"""
# Important links
| Description | Link |
|----------------|------|
| Leaderboard | [{REPO_ID}]({HF_URL}/spaces/{REPO_ID}) |
| Requests Repo | [{REQUESTS_REPO}]({HF_URL}/datasets/{REQUESTS_REPO}) |
| Results Repo | [{RESULTS_REPO}]({HF_URL}/datasets/{RESULTS_REPO}) |
"""
def auto_eval():
logger.info("Triggering Auto Eval")
main_backend_toxicity.run_auto_eval()
reverse_order_checkbox = gr.Checkbox(label="Reverse Order", value=True)
with gr.Blocks(js=dark_mode_gradio_js) as backend_ui:
gr.Markdown(intro_md)
with gr.Tab("Application"):
output_html = gr.HTML(partial(log_file_to_html_string,
reverse=reverse_order_checkbox),
every=REFRESH_VISUAL)
with gr.Row():
download_button = gr.DownloadButton("Download Log File",
value=log_file)
with gr.Accordion('Log View Configuration', open=False):
reverse_order_checkbox.render()
# Button to trigger evaluation
button = gr.Button("Manually Run Evaluation")
button.click(fn=auto_eval, inputs=[], outputs=[])
gr.Markdown(links_md)
# This dummy var was in the original demo. It will run the eval before
# fully loading the UI, and the UI will error out if it takes long.
# Changed to use BackgroundScheduler instead.
# dummy = gr.Markdown(main_backend_toxicity.run_auto_eval(), every=REFRESH_RATE, visible=False)
if __name__ == '__main__':
scheduler = BackgroundScheduler()
scheduler.add_job(auto_eval, "interval", seconds=REFRESH_RATE)
scheduler.start()
backend_ui.queue(default_concurrency_limit=40).launch(server_name="0.0.0.0",
show_error=True,
server_port=7860)
|