import os # For reading environment variables import shutil # For directory cleanup import zipfile # For extracting model archives import pathlib # For path manipulations import tempfile # For creating temporary files/directories import gradio # For interactive UI import pandas # For tabular data handling import PIL.Image # For image I/O import huggingface_hub # For downloading model assets import autogluon.multimodal # For loading AutoGluon image classifier # Hardcoded Hub model (native zip) MODEL_REPO_ID = "ccm/2025-24679-image-autogluon-predictor" ZIP_FILENAME = "autogluon_image_predictor_dir.zip" # Local cache/extract dirs CACHE_DIR = pathlib.Path("hf_assets") EXTRACT_DIR = CACHE_DIR / "predictor_native" # Download & load the native predictor def _prepare_predictor_dir() -> str: CACHE_DIR.mkdir(parents=True, exist_ok=True) local_zip = huggingface_hub.hf_hub_download( repo_id=MODEL_REPO_ID, filename=ZIP_FILENAME, repo_type="model", local_dir=str(CACHE_DIR), local_dir_use_symlinks=False, ) if EXTRACT_DIR.exists(): shutil.rmtree(EXTRACT_DIR) EXTRACT_DIR.mkdir(parents=True, exist_ok=True) with zipfile.ZipFile(local_zip, "r") as zf: zf.extractall(str(EXTRACT_DIR)) contents = list(EXTRACT_DIR.iterdir()) predictor_root = contents[0] if (len(contents) == 1 and contents[0].is_dir()) else EXTRACT_DIR return str(predictor_root) PREDICTOR_DIR = _prepare_predictor_dir() PREDICTOR = autogluon.multimodal.MultiModalPredictor.load(PREDICTOR_DIR) # Explicit class labels (edit copy as desired) CLASS_LABELS = {0: "♻️ Recycling", 1: "🗑️ Trash"} # Helper to map model class -> human label def _human_label(c): try: ci = int(c) return CLASS_LABELS.get(ci, str(c)) except Exception: return CLASS_LABELS.get(c, str(c)) # Do the prediction! def do_predict(pil_img: PIL.Image.Image): # Make sure there's actually an image to work with if pil_img is None: return "No image provided.", {}, pandas.DataFrame(columns=["Predicted label", "Confidence (%)"]) # IF we have something to work with, save it and prepare the input tmpdir = pathlib.Path(tempfile.mkdtemp()) img_path = tmpdir / "input.png" pil_img.save(img_path) df = pandas.DataFrame({"image": [str(img_path)]}) # For AutoGluon expected input format # For class probabilities proba_df = PREDICTOR.predict_proba(df) # For user-friendly column names proba_df = proba_df.rename(columns={0: "♻️ Recycling (0)", 1: "🗑️ Trash (1)"}) row = proba_df.iloc[0] # For pretty ranked dict expected by gr.Label pretty_dict = { "♻️ Recycling": float(row.get("♻️ Recycling (0)", 0.0)), "🗑️ Trash": float(row.get("🗑️ Trash (1)", 0.0)), } return pretty_dict # Representative example images! These can be local or links. EXAMPLES = [ ["https://c8.alamy.com/comp/2AEA4K9/a-garbage-and-recycling-can-on-the-campus-of-carnegie-mellon-university-pittsburgh-pennsylvania-usa-2AEA4K9.jpg"], ["https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSvid9M7DynMcoUsX0KBMxooLvrKQJwREiw6g&s"], ["https://cmccomb.com/assets/images/headshot_optimized_square.jpg"] ] # Gradio UI with gradio.Blocks() as demo: # Provide an introduction gradio.Markdown("# Trash or Recycling?") gradio.Markdown(""" This is a simple app that demonstrates how to use an autogluon multimodal predictor in a gradio space to predict the contents of a picture. To use, just upload a photo. The result should be generated automatically. """) # Interface for the incoming image image_in = gradio.Image(type="pil", label="Input image", sources=["upload", "webcam"]) # Interface elements to show htte result and probabilities proba_pretty = gradio.Label(num_top_classes=2, label="Class probabilities") # Whenever a new image is uploaded, update the result image_in.change(fn=do_predict, inputs=[image_in], outputs=[proba_pretty]) # For clickable example images gradio.Examples( examples=EXAMPLES, inputs=[image_in], label="Representative examples", examples_per_page=8, cache_examples=False, ) if __name__ == "__main__": demo.launch()