backend / app.py
derek-thomas's picture
derek-thomas HF staff
Adding better logs
b2a9c22
raw
history blame
1.57 kB
from src.logging import configure_root_logger
configure_root_logger()
import logging
import gradio as gr
from main_backend_lighteval import run_auto_eval
from src.display.log_visualizer import log_file_to_html_string
from src.display.css_html_js import dark_mode_gradio_js
from src.envs import REFRESH_RATE, REPO_ID, QUEUE_REPO, RESULTS_REPO
from src.logging import setup_logger
logging.basicConfig(level=logging.INFO)
logger = setup_logger(__name__)
intro_md = f"""
# Intro
This is just a visual for the auto evaluator.
# Important links
| Description | Link |
|-----------------|------|
| Leaderboard | [{REPO_ID}](https://huggingface.co/spaces/{REPO_ID}) |
| Queue Repo | [{QUEUE_REPO}](https://huggingface.co/datasets/{QUEUE_REPO}) |
| Results Repo | [{RESULTS_REPO}](https://huggingface.co/datasets/{RESULTS_REPO}) |
# Logs
Note that the lines of the log visual are reversed.
"""
def button_auto_eval():
logger.info("Manually triggering Auto Eval")
run_auto_eval()
with gr.Blocks(js=dark_mode_gradio_js) as demo:
with gr.Tab("Application"):
gr.Markdown(intro_md)
output = gr.HTML(log_file_to_html_string, every=1)
dummy = gr.Markdown(run_auto_eval, every=REFRESH_RATE, visible=False)
# Add a button that when pressed, triggers run_auto_eval
button = gr.Button("Manually Run Evaluation")
button.click(fn=button_auto_eval, inputs=[], outputs=[])
if __name__ == '__main__':
demo.queue(default_concurrency_limit=40).launch(server_name="0.0.0.0", show_error=True, server_port=7860)