{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Copyright 2025 Google LLC\n",
    "#\n",
    "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
    "# you may not use this file except in compliance with the License.\n",
    "# You may obtain a copy of the License at\n",
    "#\n",
    "#     https://www.apache.org/licenses/LICENSE-2.0\n",
    "#\n",
    "# Unless required by applicable law or agreed to in writing, software\n",
    "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
    "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
    "# See the License for the specific language governing permissions and\n",
    "# limitations under the License."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Imagen Product Recontext - Evaluation at Scale\n",
    "\n",
    "<table align=\"left\">\n",
    "  <td style=\"text-align: center\">\n",
    "    <a href=\"https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-creative-studio/blob/main/experiments/Imagen_Product_Recontext/evaluation_imagen_product_recontext_at_scale.ipynb\">\n",
    "      <img width=\"32px\" src=\"https://www.gstatic.com/pantheon/images/bigquery/welcome_page/colab-logo.svg\" alt=\"Google Colaboratory logo\"><br> Open in Colab\n",
    "    </a>\n",
    "  </td>\n",
    "  <td style=\"text-align: center\">\n",
    "    <a href=\"https://console.cloud.google.com/vertex-ai/colab/import/https:%2F%2Fraw.githubusercontent.com%2FGoogleCloudPlatform%vertex-ai-creative-studio%2Fmain%2Fexperiments%2FImagen_Product_Recontext%2Fevaluation_imagen_product_recontext_at_scale.ipynb\">\n",
    "      <img width=\"32px\" src=\"https://lh3.googleusercontent.com/JmcxdQi-qOpctIvWKgPtrzZdJJK-J3sWE1RsfjZNwshCFgE_9fULcNpuXYTilIR2hjwN\" alt=\"Google Cloud Colab Enterprise logo\"><br> Open in Colab Enterprise\n",
    "    </a>\n",
    "  </td>\n",
    "  <td style=\"text-align: center\">\n",
    "    <a href=\"https://console.cloud.google.com/vertex-ai/workbench/deploy-notebook?download_url=https://raw.githubusercontent.com/GoogleCloudPlatform/vertex-ai-creative-studio/main/experiments/Imagen_Product_Recontext/evaluation_imagen_product_recontext_at_scale.ipynb\">\n",
    "      <img src=\"https://www.gstatic.com/images/branding/gcpiconscolors/vertexai/v1/32px.svg\" alt=\"Vertex AI logo\"><br> Open in Vertex AI Workbench\n",
    "    </a>\n",
    "  </td>\n",
    "  <td style=\"text-align: center\">\n",
    "    <a href=\"https://console.cloud.google.com/bigquery/import?url=https://raw.githubusercontent.com/GoogleCloudPlatform/vertex-ai-creative-studio/main/experiments/Imagen_Product_Recontext/evaluation_imagen_product_recontext_at_scale.ipynb\">\n",
    "      <img src=\"https://www.gstatic.com/images/branding/gcpiconscolors/bigquery/v1/32px.svg\" alt=\"BigQuery Studio logo\"><br> Open in BigQuery Studio\n",
    "    </a>\n",
    "  </td>\n",
    "  <td style=\"text-align: center\">\n",
    "    <a href=\"https://github.com/GoogleCloudPlatform/vertex-ai-creative-studio/blob/main/experiments/Imagen_Product_Recontext/evaluation_imagen_product_recontext_at_scale.ipynb\">\n",
    "      <img width=\"32px\" src=\"https://upload.wikimedia.org/wikipedia/commons/9/91/Octicons-mark-github.svg\" alt=\"GitHub logo\"><br> View on GitHub\n",
    "    </a>\n",
    "  </td>\n",
    "</table>\n",
    "\n",
    "<div style=\"clear: both;\"></div>"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "| | |\n",
    "|-|-|\n",
    "|Author(s) | [Layolin Jesudhass](https://github.com/LUJ20), Isidro De Loera"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "Bb83RYQlii9v"
   },
   "source": [
    "## Imports"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "id": "-V6XdhI79uxD"
   },
   "outputs": [],
   "source": [
    "#!pip install --upgrade google-genai google-cloud-storage ipywidgets\n",
    "\n",
    "from google.colab import auth\n",
    "from google import genai\n",
    "from google.genai import types\n",
    "from google.cloud import storage\n",
    "import os, json, re\n",
    "from json import JSONDecodeError\n",
    "import pandas as pd\n",
    "import tempfile\n",
    "from IPython.display import display\n",
    "import ipywidgets as widgets\n",
    "import matplotlib.pyplot as plt\n",
    "from PIL import Image"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "5BvWL23P18xz",
    "outputId": "23a94218-04e0-48cc-d8af-e78fbc4a51ef"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING: google.colab.auth.authenticate_user() is not supported in Colab Enterprise.\n",
      "Authenticated with Google Cloud\n"
     ]
    }
   ],
   "source": [
    "# --------------------------------------------------\n",
    "# Authenticate your Colab session with GCP\n",
    "from google.colab import auth\n",
    "auth.authenticate_user()\n",
    "print(\"Authenticated with Google Cloud\")\n",
    "# --------------------------------------------------\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "VsqJUBnfi6VA"
   },
   "source": [
    "## Initialize Vertex AI Client"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "jmYYs65xjIEC",
    "outputId": "cd9903b0-7a83-4209-ff3a-273c0074ba73"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING: google.colab.auth.authenticate_user() is not supported in Colab Enterprise.\n",
      "Authenticated with Google Cloud\n",
      "GenAI client ready\n",
      "Storage client ready\n"
     ]
    }
   ],
   "source": [
    "# ─── Authenticate & Initialize GenAI Client\n",
    "auth.authenticate_user()\n",
    "print(\"Authenticated with Google Cloud\")\n",
    "\n",
    "PROJECT_ID    = \"consumer-genai-experiments\"\n",
    "LOCATION      = \"global\"\n",
    "BUCKET_NAME   = \"id_test_bucket\"\n",
    "INPUT_PREFIX  = \"cymbal_retail/product_images_input/\"\n",
    "OUTPUT_PREFIX = \"cymbal_retail/product_images_output/\"\n",
    "\n",
    "# GenAI client\n",
    "client = genai.Client(vertexai=True, project=PROJECT_ID, location=LOCATION)\n",
    "print(\"GenAI client ready\")\n",
    "\n",
    "# Storage client\n",
    "storage_client = storage.Client()\n",
    "print(\"Storage client ready\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "CTEkagDKitdY"
   },
   "source": [
    "## Helper Functions\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "id": "aZdnDS1X92LP"
   },
   "outputs": [],
   "source": [
    "# ─── System Instruction & User Prompt\n",
    "si_text1 = \"\"\"You are an expert visual quality evaluator for AI-generated e-commerce images.\n",
    "Your task is to compare three original product images (studio-style, white background)\n",
    "to one AI-generated output image (recontextualized lifestyle photo).\n",
    "Your goal is to assess if the output image faithfully and attractively represents the product\n",
    "while adhering to commercial quality and brand standards.\n",
    "Do not assume any information beyond what is visible in the images.\n",
    "\n",
    "For each of the six evaluation dimensions, assign a score from 1 to 5 and provide a short justification.\n",
    "\n",
    "Finally, calculate an overall quality score based on your judgment of the image's commercial viability.\n",
    "This score should summarize the output's overall fitness for use in real-world e-commerce listings\n",
    "(e.g., Cymbal Retail product pages). Return this score under the `overall_score` key in the JSON.\n",
    "\n",
    "Use your understanding of visual coherence, aesthetic judgment, and product realism to make your assessments.\"\"\"\n",
    "\n",
    "msg1_text1 = types.Part.from_text(text=\"\"\"You will be shown up to 4 images:\n",
    "• up to 3 input product photos\n",
    "• 1 AI-generated lifestyle image of the same product (filename contains \"_output.jpg\")\n",
    "\n",
    "Your task is to evaluate the output image across the following 6 dimensions.\n",
    "Each dimension should be scored on a scale from 1 to 5, where:\n",
    "- 5 = Excellent\n",
    "- 4 = Good\n",
    "- 3 = Acceptable\n",
    "- 2 = Poor\n",
    "- 1 = Unacceptable\n",
    "\n",
    "For each dimension, explain your score with 1–2 sentences of justification.\n",
    "\n",
    "### Evaluation Dimensions:\n",
    "\n",
    "1. **Product Fidelity** – Does the product in the output match the shape, color, texture, and identity seen in the input images?\n",
    "2. **Scene Realism** – Does the background setting make physical and spatial sense? Are lighting and shadows natural?\n",
    "3. **Aesthetic Quality** – Is the image visually appealing? Consider composition, balance, lighting, and professional polish.\n",
    "4. **Brand Integrity** – Are any visible logos, labels, or branding preserved, undistorted, and realistic?\n",
    "5. **Policy Compliance** – Does the image follow Cymbal Retail content policies (no people, kids, unsafe objects, political/religious content)?\n",
    "6. **Imaging Quality** – Is the image sharp, high-resolution, and free from noise, blurs, or compression artifacts?\n",
    "\n",
    "Please return the results in the following JSON format:\n",
    "\n",
    "{\n",
    "  \"product_fidelity\": { \"score\": X, \"comment\": \"...\" },\n",
    "  \"scene_realism\":   { \"score\": X, \"comment\": \"...\" },\n",
    "  \"aesthetic_quality\": { \"score\": X, \"comment\": \"...\" },\n",
    "  \"brand_integrity\":   { \"score\": X, \"comment\": \"...\" },\n",
    "  \"policy_compliance\": { \"score\": X, \"comment\": \"...\" },\n",
    "  \"imaging_quality\":   { \"score\": X, \"comment\": \"...\" },\n",
    "  \"overall_score\":     { \"score\": X, \"comment\": \"...\" }\n",
    "}\n",
    "\n",
    "Guidelines for overall_score:\n",
    "This should reflect the lowest common denominator (e.g., an otherwise perfect image\n",
    "with policy violations would get a lower overall).\n",
    "Use your judgment, not just the numeric average — it's OK to weight fidelity or\n",
    "compliance more heavily than, say, aesthetic.\n",
    "\"\"\")\n",
    "\n",
    "generate_config = types.GenerateContentConfig(\n",
    "    temperature=0.1,\n",
    "    top_p=0.95,\n",
    "    seed=0,\n",
    "    max_output_tokens=65535,\n",
    "    system_instruction=[types.Part.from_text(text=si_text1)],\n",
    "    thinking_config=types.ThinkingConfig(thinking_budget=0),\n",
    "    safety_settings=[\n",
    "        types.SafetySetting(category=\"HARM_CATEGORY_HATE_SPEECH\",      threshold=\"OFF\"),\n",
    "        types.SafetySetting(category=\"HARM_CATEGORY_DANGEROUS_CONTENT\", threshold=\"OFF\"),\n",
    "        types.SafetySetting(category=\"HARM_CATEGORY_SEXUALLY_EXPLICIT\", threshold=\"OFF\"),\n",
    "        types.SafetySetting(category=\"HARM_CATEGORY_HARASSMENT\",        threshold=\"OFF\"),\n",
    "    ]\n",
    ")\n",
    "\n",
    "# ─── Helpers ─────────────────────────────────────────────────────────────────\n",
    "def strip_code_fences(text: str) -> str:\n",
    "    m = re.search(r\"```(?:json)?\\s*\\n([\\s\\S]*?)```\", text)\n",
    "    return m.group(1) if m else text\n",
    "\n",
    "def make_part(uri: str) -> types.Part:\n",
    "    ext = os.path.splitext(uri)[1].lower()\n",
    "    mime = \"image/png\" if ext == \".png\" else \"image/jpeg\"\n",
    "    return types.Part(file_data=types.FileData(file_uri=uri, mime_type=mime))\n",
    "\n",
    "def list_product_folders(bucket_name: str, prefix: str) -> list[str]:\n",
    "    blobs = storage_client.list_blobs(bucket_name, prefix=prefix)\n",
    "    prods = {b.name[len(prefix):].split(\"/\",1)[0] for b in blobs if \"/\" in b.name[len(prefix):]}\n",
    "    return sorted(prods)\n",
    "\n",
    "def get_image_uris(bucket_name: str, prefix: str, max_images: int = 3) -> list[str]:\n",
    "    exts = {'.jpg','.jpeg','.png','.bmp','.gif','.webp'}\n",
    "    files = [b.name for b in storage_client.list_blobs(bucket_name, prefix=prefix)\n",
    "             if os.path.splitext(b.name)[1].lower() in exts]\n",
    "    return [f\"gs://{bucket_name}/{n}\" for n in sorted(files)[:max_images]]\n",
    "\n",
    "def find_output_uri(bucket_name: str, product: str) -> str:\n",
    "    exts = {'.jpg','.jpeg','.png','.bmp','.gif','.webp'}\n",
    "    prefix = f\"{OUTPUT_PREFIX}{product}/\"\n",
    "    blobs = storage_client.list_blobs(bucket_name, prefix=prefix)\n",
    "    candidates = [\n",
    "        b.name for b in blobs\n",
    "        if 'output' in b.name.lower() and os.path.splitext(b.name)[1].lower() in exts\n",
    "    ]\n",
    "    if not candidates:\n",
    "        raise FileNotFoundError(f\"No output image under gs://{bucket_name}/{prefix}\")\n",
    "    chosen = sorted(candidates)[0]\n",
    "    return f\"gs://{bucket_name}/{chosen}\"\n",
    "\n",
    "# ─── Core Eval ───────────────────────────────────────────────────────────────\n",
    "def generate(input_uris, output_uri) -> str:\n",
    "    parts = [msg1_text1] + [make_part(u) for u in input_uris] + [make_part(output_uri)]\n",
    "    full = \"\"\n",
    "    for chunk in client.models.generate_content_stream(\n",
    "        model=\"gemini-2.5-flash\",\n",
    "        contents=[types.Content(role=\"user\", parts=parts)],\n",
    "        config=generate_config\n",
    "    ):\n",
    "        full += chunk.text\n",
    "    return full\n",
    "\n",
    "def evaluate_product(product: str) -> dict:\n",
    "    in_pref    = f\"{INPUT_PREFIX}{product}/\"\n",
    "    inputs     = get_image_uris(BUCKET_NAME, in_pref)\n",
    "    output_uri = find_output_uri(BUCKET_NAME, product)\n",
    "    print(f\"  • input URIs: {inputs}\")\n",
    "    print(f\"  • output URI: {output_uri}\")\n",
    "\n",
    "    raw = generate(inputs, output_uri)\n",
    "    if not raw.strip():\n",
    "        raise ValueError(f\"Empty response for {product}\")\n",
    "\n",
    "    clean = strip_code_fences(raw).strip()\n",
    "    try:\n",
    "        return json.loads(clean)\n",
    "    except JSONDecodeError:\n",
    "        print(f\"Raw text for {product}:\\n{raw}\\n\")\n",
    "        raise\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "qhFfqJ-f7Ofx"
   },
   "source": [
    "# Sequential Run"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "YsuB2WomQsCo",
    "outputId": "d40325c1-619e-4421-fe5f-aec46b07bbd4"
   },
   "outputs": [],
   "source": [
    "#sequential Run\n",
    "\n",
    "from datetime import datetime\n",
    "\n",
    "start_time = datetime.now()\n",
    "print(f\"🔍 Scanning for product folders… (Started at {start_time.strftime('%Y-%m-%d %H:%M:%S')})\")\n",
    "\n",
    "products = list_product_folders(BUCKET_NAME, INPUT_PREFIX)\n",
    "#print(f\"Found {len(products)} products: {products}\")\n",
    "\n",
    "all_results = {}\n",
    "for p in products:\n",
    "    #print(f\"\\nEvaluating {p} …\")\n",
    "    try:\n",
    "        res = evaluate_product(p)\n",
    "        all_results[p] = res\n",
    "        #print(f\"JSON parsed: {json.dumps(res, indent=2)}\")\n",
    "        # Save per-product JSON\n",
    "        json_path = f\"{OUTPUT_PREFIX}{p}/{p}_evaluation.json\"\n",
    "        storage_client.bucket(BUCKET_NAME).blob(json_path).upload_from_string(\n",
    "            json.dumps(res), content_type='application/json')\n",
    "        #print(f\"Saved JSON to {json_path}\")\n",
    "    except Exception as err:\n",
    "        print(f\" Failed to evaluate {p}: {err}\")\n",
    "\n",
    "end_time = datetime.now()\n",
    "print(f\"\\nCompleted at {end_time.strftime('%Y-%m-%d %H:%M:%S')}\")\n",
    "print(f\"Total time taken: {end_time - start_time}\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "stXq_zRlSGCm"
   },
   "source": [
    "# Parallel Run"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "YMIky_NdSJrV",
    "outputId": "cbf208f4-8e8c-4084-ce75-b1a50e869929"
   },
   "outputs": [],
   "source": [
    "#Parallel Run\n",
    "from datetime import datetime\n",
    "from concurrent.futures import ThreadPoolExecutor, as_completed\n",
    "\n",
    "start_time = datetime.now()\n",
    "print(f\"Scanning for product folders… (Started at {start_time.strftime('%Y-%m-%d %H:%M:%S')})\")\n",
    "\n",
    "products = list_product_folders(BUCKET_NAME, INPUT_PREFIX)\n",
    "#print(f\"Found {len(products)} products: {products}\")\n",
    "\n",
    "MAX_WORKERS = 8 #Change the number as needed\n",
    "\n",
    "all_results = {}\n",
    "\n",
    "def process_product(p):\n",
    "    try:\n",
    "        #print(f\"Evaluating {p} …\")\n",
    "        res = evaluate_product(p)\n",
    "        json_str = json.dumps(res, indent=2)\n",
    "        #print(f\"JSON parsed for {p}:\\n{json_str}\")\n",
    "\n",
    "        json_path = f\"{OUTPUT_PREFIX}{p}/{p}_evaluation.json\"\n",
    "        storage_client.bucket(BUCKET_NAME).blob(json_path).upload_from_string(\n",
    "            json_str, content_type='application/json')\n",
    "        #print(f\"Saved JSON to {json_path}\")\n",
    "        return (p, res)\n",
    "    except Exception as err:\n",
    "        print(f\"Failed to evaluate {p}: {err}\")\n",
    "        return (p, None)\n",
    "\n",
    "# Adjust max_workers based on I/O load and system limits\n",
    "with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:\n",
    "    future_to_product = {executor.submit(process_product, p): p for p in products}\n",
    "    for future in as_completed(future_to_product):\n",
    "        p, result = future.result()\n",
    "        if result is not None:\n",
    "            all_results[p] = result\n",
    "\n",
    "end_time = datetime.now()\n",
    "print(f\"\\nCompleted at {end_time.strftime('%Y-%m-%d %H:%M:%S')}\")\n",
    "print(f\"Total time taken: {end_time - start_time}\")\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "id": "-0McpIMLqBye"
   },
   "outputs": [],
   "source": [
    "# Sample stats\n",
    "# Evaluation results:\n",
    "# Sequential : Total time taken: 0:01:23.264453\n",
    "# Parallel : 2 threads : Total time taken: 0:00:38.668612\n",
    "# Parallel : 4 threads : Total time taken: 0:00:20.207010\n",
    "# Parallel : 8 threads : Total time taken: 0:00:11.559379"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "B2WmUK4P7qu4"
   },
   "source": [
    "# Tabulate & Save Summary\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "OPeoy2O77jrY",
    "outputId": "8bd819db-fb3f-4131-9802-3e0695087bf9"
   },
   "outputs": [],
   "source": [
    "# ─── Tabulate & Save Summary ─────────────────────────────────────────────────\n",
    "rows = []\n",
    "for prod, metrics in all_results.items():\n",
    "    input_product_uri  = f\"gs://{BUCKET_NAME}/{INPUT_PREFIX}{prod}/\"\n",
    "    output_product_uri = find_output_uri(BUCKET_NAME, prod)\n",
    "    overall = metrics.get(\"overall_score\", {}).get(\"score\")\n",
    "    comment = metrics.get(\"overall_score\", {}).get(\"comment\")\n",
    "    if overall is None or comment is None:\n",
    "        raise KeyError(f\"JSON for {prod} missing overall_score fields\")\n",
    "    row = {\n",
    "        \"product\": prod,\n",
    "        \"input_product_uri\":  input_product_uri,\n",
    "        \"output_product_uri\": output_product_uri,\n",
    "        \"overall_score\": overall,\n",
    "        \"overall_comment\": comment\n",
    "    }\n",
    "    for dim, info in metrics.items():\n",
    "        if dim == \"overall_score\": continue\n",
    "        row[f\"{dim}_score\"]   = info[\"score\"]\n",
    "        row[f\"{dim}_comment\"] = info[\"comment\"]\n",
    "    rows.append(row)\n",
    "\n",
    "# DataFrame & CSV\n",
    "df = pd.DataFrame(rows)\n",
    "summary_path = f\"{OUTPUT_PREFIX}evaluation_summary.csv\"\n",
    "storage_client.bucket(BUCKET_NAME).blob(summary_path).upload_from_string(\n",
    "    df.to_csv(index=False), content_type='text/csv')\n",
    "print(f\"Saved evaluation summary CSV to {summary_path}\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "MTztbT-ZGPGq"
   },
   "source": [
    "# Threshold Based Image Viewer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "id": "Z9zcIMOh7jih"
   },
   "outputs": [],
   "source": [
    "# ─── Threshold-Based Image Viewer\n",
    "def display_product_images(product: str):\n",
    "    input_uris = get_image_uris(BUCKET_NAME, f\"{INPUT_PREFIX}{product}/\")\n",
    "    output_uri = find_output_uri(BUCKET_NAME, product)\n",
    "    local_paths = []\n",
    "    for uri in input_uris + [output_uri]:\n",
    "        key = uri.replace(f\"gs://{BUCKET_NAME}/\", \"\")\n",
    "        local = os.path.join(tempfile.gettempdir(), os.path.basename(key))\n",
    "        storage_client.bucket(BUCKET_NAME).blob(key).download_to_filename(local)\n",
    "        local_paths.append(local)\n",
    "    imgs = [Image.open(p) for p in local_paths]\n",
    "    fig, axes = plt.subplots(1, len(imgs), figsize=(5*len(imgs),5))\n",
    "    for ax, im in zip(axes, imgs):\n",
    "        ax.imshow(im); ax.axis('off')\n",
    "    plt.show()\n",
    "\n",
    "def review_threshold(threshold: float = 4.0):\n",
    "    if 'overall_score' not in df.columns:\n",
    "        raise RuntimeError(\"`df` missing `overall_score`.\")\n",
    "    filtered = df[df['overall_score'] <= threshold]\n",
    "    prods = filtered['product'].tolist()\n",
    "    if not prods:\n",
    "        print(f\"No products <= {threshold}\")\n",
    "        return\n",
    "\n",
    "    slider = widgets.IntSlider(min=0, max=len(prods)-1, description='Index', continuous_update=False)\n",
    "    out = widgets.Output()\n",
    "    def on_change(change):\n",
    "        with out:\n",
    "            out.clear_output(wait=True)\n",
    "            idx = change['new']\n",
    "            prod = prods[idx]\n",
    "            score = df.loc[df['product']==prod,'overall_score'].iloc[0]\n",
    "            comment = df.loc[df['product']==prod,'overall_comment'].iloc[0]\n",
    "            print(f\"Product {idx+1}/{len(prods)}: {prod} (score={score:.3f})\")\n",
    "            print(f\"Comment: {comment}\")\n",
    "            display_product_images(prod)\n",
    "    slider.observe(on_change, names='value')\n",
    "    display(slider, out)\n",
    "    on_change({'new': 0})\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 383,
     "referenced_widgets": [
      "20b319ff21d14a0d8d12f31ac8e0c4c0",
      "1de11858979f48e9b2062a42e5cb40f3",
      "e2b2221188de420595061f85ebc4222c",
      "611840916529449bb0942a41a9114cec",
      "c608a1a7e00949c2889b746c5c702010"
     ]
    },
    "id": "S9PHTbjH7jXA",
    "outputId": "642fc8e8-8717-45ea-c69e-d70ea9d184ca"
   },
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "20b319ff21d14a0d8d12f31ac8e0c4c0",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "IntSlider(value=0, continuous_update=False, description='Index', max=11)"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "611840916529449bb0942a41a9114cec",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Output()"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "review_threshold(4.0)"
   ]
  }
 ],
 "metadata": {
  "colab": {
   "name": "Imagen_Product_Recontext_Evaluation.ipynb",
   "provenance": []
  },
  "kernelspec": {
   "display_name": "Python 3",
   "name": "python3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}
