import gradio as gr from gradio_leaderboard import Leaderboard from gradio.themes.utils import sizes import pandas as pd from evaluate import submit_data, evaluate_data from utils import make_tag_clickable, make_user_clickable, fetch_dataset_df from about import ENDPOINTS, LB_COLS, LB_AVG, LB_DTYPES ALL_EPS = ['Average'] + ENDPOINTS def build_leaderboard(df_results): per_ep = {} for ep in ALL_EPS: df = df_results[df_results["endpoint"] == ep].copy() if df is None: print(f"[refresh] {ep} returned None; using empty DF") if df.empty: per_ep[ep] = pd.DataFrame(columns=LB_COLS) # Empty df continue # Make user and model details clickable if it's a huggingface user df['user'] = df.apply( lambda row: make_user_clickable(row['user']) if not row['anonymous'] else row['user'], axis=1).astype(str) df['model details'] = df['model_report'].apply(lambda x: make_tag_clickable(x)).astype(str) if ep == "Average": df["MA-RAE"] = df["RAE"] # The average of the RAE per endpoint sorted_df = df.sort_values(by='MA-RAE', ascending=True, kind="stable") per_ep[ep] = sorted_df[LB_AVG] else: sorted_df = df.sort_values(by="MAE", ascending=True, kind="stable") per_ep[ep] = sorted_df[LB_COLS] return per_ep # Initialize global dataframe current_df = fetch_dataset_df() def gradio_interface(): with gr.Blocks(title="OpenADMET ADMET Challenge", fill_height=False, theme=gr.themes.Default(text_size=sizes.text_lg)) as demo: timer = gr.Timer(15) # Run every 15 seconds when page is focused data_version = gr.State(0) # Track data changes def update_current_dataframe(v): global current_df new_df = fetch_dataset_df() if not current_df.equals(new_df): current_df = new_df return v + 1 return v timer.tick(fn=update_current_dataframe, inputs=[data_version], outputs=data_version) ### Header with gr.Row(): with gr.Column(scale=7): # bigger text area gr.Markdown(""" ## Welcome to the OpenADMET + ExpansionRx Blind Challenge! Your task is to develop and submit predictive models for key ADMET properties on a blinded test set of real world drug discovery data ๐Ÿง‘โ€๐Ÿ”ฌ Go to the **Leaderboard** to check out how the challenge is going. To participate, head out to the **Submit** tab and upload your results as a `CSV` file. **The challenge is not yet open for submissions. Plase stay tuned for the official launch date!** """ ) with gr.Column(scale=2): # smaller side column for logo gr.Image( value="./_static/challenge_logo.png", show_label=False, show_download_button=False, width="5vw", # Take up the width of the column (2/8 = 1/4) ) # --- Welcome markdown message --- welcome_md = """ # ๐Ÿ’Š OpenADMET + ExpansionRx ## Computational Blind Challenge in ADMET This challenge is a community-driven initiative to benchmark predictive models for ADMET properties in drug discovery, hosted by **OpenADMET** in collaboration with **ExpansionRx**. ## Why are ADMET properties important in drug discovery? Small molecules continue to be the bricks and mortar of drug discovery globally, accounting for ~75% of FDA approvals over the last decade. Oral bioavailability, easily tunable properties, modulation of a wide range of mechanisms, and ease of manufacturing make small molecules highly attractive as therapeutic agents. Moreover, emerging small molecule modalities such as degraders, expression modulators, molecular glues, and antibody-drug conjugates (to name a few) have vastly expanded what we thought small molecules were capable of. It is fairly difficult to predict the lifetime and distribution of small molecules within the body. Additionally, interaction with off-targets can cause safety issues and toxicity. Collectively these *Absorption*, *Distribution*, *Metabolism*, *Excretion*, *Toxicology*--or **ADMET**--properties sit in the middle of the assay cascade and can make or break preclinical candidate molecules. **OpenADMET** aims to address these challenges through an open science effort to build predictive models of ADMET properties by characterizing the proteins and mechanisms that give rise to these properties through integrated structural biology, high throughput experimentation and integrative computational models. Read more about our strategy to transform drug discovery on our [website](https://openadmet.org/community/blogs/whatisopenadmet/). Critical to our mission is developing open datasets and running community blind challenges to assess the current state of the art in ADMET modeling. Building on the sucess of the recent [ASAP-Polaris-OpenADMET blind challenge](https://chemrxiv.org/engage/chemrxiv/article-details/68ac00d1728bf9025e22fe45) in computational methods for drug discovery, we bring you a brand new challenge in collaboration with **ExpansionRx**. During a recent series of drug discovery campaigns for RNA mediated diseases, ExpansionRX collected a variety of ADMET data for off-targets and properties of interest, which they are generously sharing with the community for this challenge. ## ๐Ÿงช The Challenge Participants will be tasked with solving real-world ADMET prediction problems ExpansionRx faced during lead optimization. Specifically, you will be asked to predict the ADMET properties of late-stage molecules based on earlier-stage data from the same campaigns. For this challenge we selected ten (10) crucial endpoints for the community to predict: - LogD - Kinetic Solubility **KSOL**: uM - Mouse Liver Microsomal (**MLM**) *CLint*: mL/min/kg - Human Liver Microsomal (**HLM**) *Clint*: mL/min/kg - Caco-2 Efflux Ratio - Caco-2 Papp A>B (10^-6 cm/s) - Mouse Plasma Protein Binding (**MPPB**): % Unbound - Mouse Brain Protein Binding (**MBPB**): % Unbound - Rat Liver Microsomal (**RLM**) *Clint*: mL/min/kg - Mouse Gastrocnemius Muscle Binding (**MGMB**): % Unbound Find more information about these endpoints on our [blog](https://openadmet.org/community/blogs/challenge_announcement2/). ## โœ… How to Participate 1. **Register**: Create an account with Hugging Face. 2. **Download the Public Dataset**: Clone the ExpansionRx dataset [link] 3. **Train Your Model**: Use the provided training data for each ADMET property of your choice. 4. **Submit Predictions**: Follow the instructions in the *Submit* tab to upload your predictions. 5. Join the discussion on the [Challenge Discord](https://discord.gg/MY5cEFHH3D)! ## ๐Ÿ“Š Data: The training set contains the following parameters: | Column | Unit | Type | Description | |:---------------------------- |:----------: |:--------: |:----------------------------------------------| | Molecule Name | | str | Identifier for the molecule | | Smiles | | str | Text representation of the 2D molecular structure | | LogD | | float | LogD calculation | | KSol | uM | float | Kinetic Solubility | | MLM CLint | mL/min/kg | float | Mouse Liver Microsomal | | HLM CLint | mL/min/kg | float | Human Liver Microsomal | | Caco-2 Permeability Efflux | | float | Caco-2 Permeability Efflux | | Caco-2 Permeability Papp A>B | 10^-6 cm/s | float | Caco-2 Permeability Papp A>B | | MPPB | % Unbound | float | Mouse Plasma Protein Binding | | MBPB | % Unbound | float | Mouse Brain Protein Binding | | RLM CLint | mL/min/kg | float | Rat Liver Microsomal Stability | | MGMB. | % Unbound | float | Mouse Gastrocnemius Muscle Binding | You can download the training data from the [Hugging Face dataset](https://huggingface.co/datasets/OpenADMET/openadmet-challenge-training-set). The test set will remained blinded until the challenge submission deadline. You will be tasked with predicting the same set of ADMET endpoints for the test set molecules. ## ๐Ÿ“ Evaluation The challenge will be judged based on the following criteria: - We welcome submissions of any kind, including machine learning and physics-based approaches. You can also employ pre-training approaches as you see fit, as well as incorporate data from external sources into your models and submissions. - In the spirit of open science and open source we would love to see code showing how you created your submission if possible, in the form of a Github Repository. If not possible due to IP or other constraints you must at a minimum provide a short report written methodology based on the template [here](link to google doc). **Make sure your lat submission before the deadline includes a link to a report or to a Github repository.** - Each participant can submit as many times as they like, up to a limit of 5 times/day. **Only your latest submission will be considered for the final leaderboard.** - The endpoints will be judged individually by mean absolute error (**MAE**), while an overall leaderboard will be judged by the macro-averaged relative absolute error (**MA-RAE**). - For endpoints that are not already on a log scale (e.g LogD) they will be transformed to log scale to minimize the impact of outliers on evaluation. - We will estimate errors on the metrics using bootstrapping and use the statistical testing workflow outlined in [this paper](https://chemrxiv.org/engage/chemrxiv/article-details/672a91bd7be152b1d01a926b) to determine if model performance is statistically distinct. ๐Ÿ“… **Timeline**: - **September 16:** Challenge announcement - **September XX:** Sample data release - **October 27:** Challenge starts - **October-November:** Online Q&A sessions and support via the Discord channel - **January 19, 2026:** Submission closes - **January 26, 2026:** Winners announced --- """ # --- Gradio Interface --- gr.HTML(""" """) with gr.Tabs(elem_classes="tab-buttons"): lboard_dict = {} with gr.TabItem("๐Ÿ“– About"): gr.Markdown(welcome_md, elem_id="welcome-md") with gr.TabItem("๐Ÿš€ Leaderboard", elem_id="lb_subtabs"): gr.Markdown(""" View the leaderboard for each ADMET endpoint by selecting the appropiate tab. **THE RESULTS ON THE LEADERBOARD ARE CURRENTLY PLACEHOLDERS AND DO NOT REPRESENT ACTUAL SUBMISSIONS.** Actual results will be available once the challenge is open. """) # Make separate leaderboards in separate tabs #per_ep = build_leaderboard() # Aggregated leaderboard with gr.TabItem('OVERALL', elem_id="all_tab"): lboard_dict['Average'] = Leaderboard( value=build_leaderboard(current_df)['Average'], datatype=LB_DTYPES, select_columns=LB_AVG, search_columns=["user"], render=True, every=15, ) # per-endpoint leaderboard for endpoint in ENDPOINTS: with gr.TabItem(endpoint): lboard_dict[endpoint] = Leaderboard( value=build_leaderboard(current_df)[endpoint], datatype=LB_DTYPES, select_columns=LB_COLS, search_columns=["user"], render=True, every=15, ) # Auto-refresh def refresh_if_changed(): per_ep = build_leaderboard(current_df) #return [gr.update(value=per_ep.get(ep, pd.DataFrame(columns=LB_COLS))) for ep in ALL_EPS] return [per_ep[ep] for ep in ALL_EPS] data_version.change(fn=refresh_if_changed, outputs=[lboard_dict[ep] for ep in ALL_EPS]) with gr.TabItem("โœ‰๏ธ Submit"): gr.Markdown( """ # ADMET Endpoints Submission Upload your prediction files here as a csv file. """ ) filename = gr.State(value=None) eval_state = gr.State(value=None) user_state = gr.State(value=None) with gr.Row(): with gr.Column(): gr.Markdown( """ ## Participant Information To participate, **we require a Hugging Face username**, which will be used to track multiple submissions. Your username will be displayed on the leaderboard, unless you check the *anonymous* box. If you want to remain anonymous, please provide an alias to be used for the leaderboard (we'll keep the username hidden). If you wish to be included in Challenge discussions, please provide your Discord username and email. If you wish to be included in a future publication with the Challenge results, please provide your name and affiliation (and check the box below). We also ask you to provide a link to a report decribing your method. While not mandatory at the time of participation, you need to submit the link before the challenge deadline in order to be considered for the final leaderboard. """ ) username_input = gr.Textbox( label="Username", placeholder="Enter your Hugging Face username", # info="This will be displayed on the leaderboard." ) user_alias = gr.Textbox( label="Optional Alias", placeholder="Enter an identifying alias for the leaderboard if you wish to remain anonymous", # info="This will be displayed on the leaderboard." ) anon_checkbox = gr.Checkbox( label="I want to submit anonymously", info="If checked, your username will be replaced with the given *alias* on the leaderboard.", value=False, ) with gr.Column(): # Info to track participant, that will not be displayed publicly participant_name = gr.Textbox( label="Participant Name", placeholder="Enter your name (optional)", info="This will not be displayed on the leaderboard but will be used for tracking participation." ) discord_username= gr.Textbox( label="Discord Username", placeholder="Enter your Discord username (optional)", info="Enter the username you will use for the Discord channel (if you are planning to engage in the discussion)." ) email = gr.Textbox( label="Email", placeholder="Enter your email (optional)", ) affiliation = gr.Textbox( label="Affiliation", placeholder="Enter your school/company affiliation (optional)", ) model_tag = gr.Textbox( label="Model Report", placeholder="Link to a report describing your method (optional)", ) paper_checkbox = gr.Checkbox( label="I want to be included in a future publication detailing the Challenge results", value=False, ) with gr.Row(): with gr.Column(): gr.Markdown( """ ## Submission Instructions Upload a single CSV file containing your predictions for all ligands in the test set. Only your latest submission will be considered. You can download a CSV template with the ligands in the test set here. """ ) download_btn = gr.DownloadButton( label="๐Ÿ“ฅ Download Test Set Template", value="data/test_set-example.csv", variant="secondary", ) with gr.Column(): predictions_file = gr.File(label="Single file with ADMET predictions (.csv)", file_types=[".csv"], file_count="single",) username_input.change( fn=lambda x: x if x.strip() else None, inputs=username_input, outputs=user_state ) submit_btn = gr.Button("๐Ÿ“ค Submit Predictions") message = gr.Textbox(label="Status", lines=1, visible=False) submit_btn.click( submit_data, inputs=[predictions_file, user_state, participant_name, discord_username, email, affiliation, model_tag, user_alias, anon_checkbox, paper_checkbox], outputs=[message, filename], ).success( fn=lambda m: gr.update(value=m, visible=True), inputs=[message], outputs=[message], ).success( fn=evaluate_data, inputs=[filename], outputs=[eval_state] ) return demo if __name__ == "__main__": gradio_interface().launch(ssr_mode=False)