{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Import the data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Create the datapoint"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import load_dataset\n",
    "\n",
    "ds = load_dataset(\"osunlp/Multimodal-Mind2Web\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Declare classes\n",
    "import dataclasses\n",
    "from dataclasses import dataclass\n",
    "from typing import List, Literal, Tuple\n",
    "import json\n",
    "\n",
    "@dataclass\n",
    "class Coordinate:\n",
    "    x: int\n",
    "    y: int\n",
    "\n",
    "@dataclass\n",
    "class ScrollBar:\n",
    "    offset: float\n",
    "    height: float\n",
    "\n",
    "@dataclass\n",
    "class BrowserState:\n",
    "    screenshot: str\n",
    "    height: int\n",
    "    width: int\n",
    "    scrollbar: ScrollBar\n",
    "    url: str\n",
    "    mouse: Coordinate\n",
    "\n",
    "@dataclass\n",
    "class BrowserAction:\n",
    "    action: Literal[\n",
    "        \"success\",\n",
    "        \"failure\",\n",
    "        \"key\",\n",
    "        \"type\",\n",
    "        \"mouse_move\",\n",
    "        \"left_click\",\n",
    "        \"left_click_drag\",\n",
    "        \"right_click\",\n",
    "        \"middle_click\",\n",
    "        \"double_click\",\n",
    "        \"screenshot\",\n",
    "        \"cursor_position\",\n",
    "        \"scroll_up\",\n",
    "        \"scroll_down\",\n",
    "    ]\n",
    "    # TODO: Do we want to use Coordinate class here, or easier to just construct with tuple\n",
    "    coordinate: tuple[int, int] | None\n",
    "    text: str | None\n",
    "    reasoning: str\n",
    "    id: str\n",
    "\n",
    "\n",
    "@dataclass\n",
    "class BrowserStep:\n",
    "    state: BrowserState\n",
    "    action: BrowserAction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Declare functions\n",
    "\n",
    "import random\n",
    "\n",
    "def generate_tool_id() -> str:\n",
    "    prefix = 'toolu_01'\n",
    "    characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'\n",
    "    id_length = 22\n",
    "    result = prefix\n",
    "\n",
    "    for _ in range(id_length):\n",
    "        result += random.choice(characters)\n",
    "\n",
    "    return result\n",
    "\n",
    "\n",
    "def is_in_viewport(viewport, point):\n",
    "    x1, y1, x2, y2 = viewport\n",
    "    x, y = point\n",
    "    return x1 <= x <= x2 and y1 <= y <= y2\n",
    "\n",
    "def scroll_viewport(direction, viewport, y_max):\n",
    "    x1, y1, x2, y2 = viewport\n",
    "    height = y2 - y1\n",
    "    scroll_amount = 0.75 * height\n",
    "\n",
    "    if direction == \"up\":\n",
    "        new_y1 = max(1, y1 - scroll_amount)\n",
    "        new_y2 = new_y1 + height\n",
    "    elif direction == \"down\":\n",
    "        new_y2 = min(y_max, y2 + scroll_amount)\n",
    "        new_y1 = new_y2 - height\n",
    "    else:\n",
    "        raise ValueError(\"Direction must be 'up' or 'down'\")\n",
    "\n",
    "    # Adjust if the new viewport exceeds bounds while preserving height\n",
    "    if new_y1 < 1:\n",
    "        new_y1 = 1\n",
    "        new_y2 = new_y1 + height\n",
    "    if new_y2 > y_max:\n",
    "        new_y2 = y_max\n",
    "        new_y1 = new_y2 - height\n",
    "\n",
    "    return (x1, new_y1, x2, new_y2)\n",
    "\n",
    "def viewport_screenshot(screenshot, viewport):\n",
    "    import base64\n",
    "    from io import BytesIO\n",
    "\n",
    "    x1, y1, x2, y2 = map(int, viewport)\n",
    "    cropped_image = screenshot.copy().crop((x1, y1, x2, y2))\n",
    "    \n",
    "    buffered = BytesIO()\n",
    "    cropped_image.save(buffered, format=\"JPEG\", quality=85)\n",
    "    encoded_string = base64.b64encode(buffered.getvalue()).decode(\"utf-8\")\n",
    "    \n",
    "    return encoded_string\n",
    "    # return \"\"\n",
    "\n",
    "\n",
    "def process_step(step, mouse_coordinates: Coordinate) -> Tuple[List[BrowserStep], Coordinate]:\n",
    "    cerebellum_steps: List[BrowserStep]  = []\n",
    "\n",
    "    # Initialize the viewport to the top 16:10 ratio part of the screenshot\n",
    "    screenshot = step[\"screenshot\"]\n",
    "    width, height = screenshot.size\n",
    "    viewport_height = width * 10 / 16\n",
    "    viewport = (0, 0, width, viewport_height)\n",
    "\n",
    "    # Find the bounding box of the first pos_candidates\n",
    "    if len(step[\"pos_candidates\"]) == 0:\n",
    "        return ([], mouse_coordinates)\n",
    "    \n",
    "    candidate = json.loads(step[\"pos_candidates\"][0])\n",
    "    attributes = json.loads(candidate[\"attributes\"])\n",
    "    bounding_box_rect = attributes[\"bounding_box_rect\"]\n",
    "    x, y, box_width, box_height = map(float, bounding_box_rect.split(','))\n",
    "    center_x = x + box_width / 2\n",
    "    center_y = y + box_height / 2\n",
    "\n",
    "    if not (0 <= center_x <= width and 0 <= center_y <= height):\n",
    "        print(\"Bounding box coordinates outside of provided screenshot, skipping step\")\n",
    "        return ([], mouse_coordinates)\n",
    "\n",
    "    # Scroll the viewport until the center of the bounding box is in view\n",
    "    y_max = float(height)\n",
    "    while not is_in_viewport(viewport, (center_x, center_y)):\n",
    "        if center_y < viewport[1]:\n",
    "            browser_state = BrowserState(\n",
    "                url='',\n",
    "                screenshot=viewport_screenshot(screenshot, viewport),\n",
    "                height=viewport_height,\n",
    "                width=width,\n",
    "                scrollbar=ScrollBar(offset= float(viewport[1])/y_max, height=float(viewport_height)/y_max),\n",
    "                mouse=mouse_coordinates\n",
    "            )\n",
    "            page_up_action = BrowserAction(\n",
    "                action=\"key\",\n",
    "                coordinate=None,\n",
    "                text=\"PAGE_UP\",\n",
    "                reasoning=\"Press the Page Up key to scroll up\",\n",
    "                id=generate_tool_id()\n",
    "            )\n",
    "            cerebellum_steps.append(BrowserStep(state=browser_state, action=page_up_action))\n",
    "\n",
    "            viewport = scroll_viewport(\"up\", viewport, y_max)\n",
    "        elif center_y > viewport[3]:\n",
    "\n",
    "            browser_state = BrowserState(\n",
    "                url='',\n",
    "                screenshot=viewport_screenshot(screenshot, viewport),\n",
    "                height=viewport_height,\n",
    "                width=width,\n",
    "                scrollbar=ScrollBar(offset= float(viewport[1])/y_max, height=float(viewport_height)/y_max),\n",
    "                mouse=mouse_coordinates\n",
    "            )\n",
    "            page_down_action = BrowserAction(\n",
    "                action=\"key\",\n",
    "                coordinate=None,\n",
    "                text=\"PAGE_DOWN\",\n",
    "                reasoning=\"Press the Page Down key to scroll down\",\n",
    "                id=generate_tool_id()\n",
    "            )\n",
    "            cerebellum_steps.append(BrowserStep(state=browser_state, action=page_down_action))\n",
    "            viewport = scroll_viewport(\"down\", viewport, y_max)\n",
    "\n",
    "        \n",
    "\n",
    "    # Create a mouse movement action to position the mouse into the center of the bounding box\n",
    "    # Remap center_x and center_y relative to the current viewport\n",
    "    center_x_relative = center_x - viewport[0]\n",
    "    center_y_relative = center_y - viewport[1]\n",
    "    mouse_move_action = BrowserAction(\n",
    "        action=\"mouse_move\",\n",
    "        coordinate=(center_x_relative, center_y_relative),\n",
    "        text=None,\n",
    "        reasoning=\"Move mouse to the center of the element\",\n",
    "        id=generate_tool_id()\n",
    "    )\n",
    "    browser_state = BrowserState(\n",
    "            url='',\n",
    "            screenshot=viewport_screenshot(screenshot, viewport),\n",
    "            height=viewport_height,\n",
    "            width=width,\n",
    "            scrollbar=ScrollBar(offset= float(viewport[1])/y_max, height=float(viewport_height)/y_max),\n",
    "            mouse=mouse_coordinates\n",
    "        )\n",
    "    move_step = BrowserStep(state=browser_state, action=mouse_move_action)\n",
    "    cerebellum_steps.append(move_step)\n",
    "\n",
    "    # Pretend now the mouse was moved\n",
    "    mouse_coordinates = Coordinate(x=center_x_relative, y=center_y_relative)\n",
    "\n",
    "    # Perform a left click action\n",
    "    left_click_action = BrowserAction(\n",
    "        action=\"left_click\",\n",
    "        coordinate=None,\n",
    "        text=None,\n",
    "        reasoning=\"Perform a left click on element\",\n",
    "        id=generate_tool_id()\n",
    "    )\n",
    "    browser_state = BrowserState(\n",
    "            url='',\n",
    "            screenshot=viewport_screenshot(screenshot, viewport),\n",
    "            height=viewport_height,\n",
    "            width=width,\n",
    "            scrollbar=ScrollBar(offset= float(viewport[1])/y_max, height=float(viewport_height)/y_max),\n",
    "            mouse=mouse_coordinates\n",
    "        )\n",
    "    left_click_step = BrowserStep(state=browser_state, action=left_click_action)\n",
    "    cerebellum_steps.append(left_click_step)\n",
    "\n",
    "    # Create corresponding key actions if the action is \"type\" or \"select\"\n",
    "    operation = json.loads(step[\"operation\"])\n",
    "    if operation[\"op\"] in [\"TYPE\", \"SELECT\"]:\n",
    "        text = operation[\"value\"]\n",
    "        type_action = BrowserAction(\n",
    "            action='type',\n",
    "            coordinate=None,\n",
    "            text=text,\n",
    "            reasoning=f\"Typing text set to desired value\",\n",
    "            id=generate_tool_id()\n",
    "        )\n",
    "        browser_state = BrowserState(\n",
    "            url='',\n",
    "            screenshot=viewport_screenshot(screenshot, viewport),\n",
    "            height=viewport_height,\n",
    "            width=width,\n",
    "            scrollbar=ScrollBar(offset= float(viewport[1])/y_max, height=float(viewport_height)/y_max),\n",
    "            mouse=mouse_coordinates\n",
    "        )\n",
    "        type_step = BrowserStep(state=browser_state, action=type_action)\n",
    "        cerebellum_steps.append(type_step)\n",
    "\n",
    "    # Return an array of BrowserStep[]\n",
    "    return (cerebellum_steps, mouse_coordinates)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train = ds.get(\"train\")\n",
    "\n",
    "train_iterator = iter(train)\n",
    "\n",
    "print(list(train[0].keys()))\n",
    "\n",
    "data_point = next(train_iterator)\n",
    "i=1;\n",
    "while train_iterator is not None:\n",
    "    goal = data_point[\"confirmed_task\"]\n",
    "    task_id = data_point[\"annotation_id\"]\n",
    "    action_id = data_point[\"action_uid\"]\n",
    "\n",
    "    print('Grabbing steps for:', goal, task_id)\n",
    "\n",
    "    steps = [data_point]\n",
    "\n",
    "    # Keep on pulling on the iterator until we get all the steps in this task\n",
    "    while True:\n",
    "        # i+=1\n",
    "        # print(i, data_point[\"action_uid\"])\n",
    "        try:\n",
    "            data_point = next(train_iterator)\n",
    "        except StopIteration:\n",
    "            train_iterator = None\n",
    "            break\n",
    "        \n",
    "        # if data_point[\"action_uid\"] == \"aea31efd-c391-4099-a13e-3a9417cca68f\":\n",
    "        #     print(data_point)\n",
    "\n",
    "        if data_point[\"confirmed_task\"] != goal:\n",
    "            break;\n",
    "        \n",
    "        steps.append(data_point)\n",
    "\n",
    "    cerebellum_steps: List[BrowserStep] = []\n",
    "\n",
    "    mouse = Coordinate(x=1, y=1)\n",
    "    for raw_step in steps:\n",
    "        \n",
    "        decomposed_steps, mouse = process_step(raw_step, mouse)\n",
    "\n",
    "        cerebellum_steps += decomposed_steps\n",
    "\n",
    "    # Define the output file path\n",
    "    output_file_path = f'mind2web/{task_id}.jsonl'\n",
    "\n",
    "    # Open the file in write mode\n",
    "    with open(output_file_path, 'w') as outfile:\n",
    "        goal_json = json.dumps({\"goal\": goal})\n",
    "        outfile.write(goal_json)\n",
    "        outfile.write('\\n')\n",
    "        # Iterate over each step in cerebellum_steps\n",
    "        for this_step in cerebellum_steps:\n",
    "            # Write the dictionary as a JSON line\n",
    "            step_str = json.dumps(dataclasses.asdict(this_step))\n",
    "            outfile.write(step_str)\n",
    "            outfile.write('\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import base64\n",
    "\n",
    "CURSOR_64 = \"iVBORw0KGgoAAAANSUhEUgAAAAoAAAAQCAYAAAAvf+5AAAAAw3pUWHRSYXcgcHJvZmlsZSB0eXBlIGV4aWYAAHjabVBRDsMgCP33FDuC8ijF49i1S3aDHX9YcLFLX+ITeOSJpOPzfqVHBxVOvKwqVSQbuHKlZoFmRzu5ZD55rvX8Uk9Dz2Ql2A1PVaJ/1MvPwK9m0TIZ6TOE7SpUDn/9M4qH0CciC/YwqmEEcqGEQYsvSNV1/sJ25CvUTxqBjzGJU86rbW9f7B0QHSjIxoD6AOiHE1oXjAlqjQVyxmTMkJjEFnK3p4H0BSRiWUv/cuYLAAABhWlDQ1BJQ0MgcHJvZmlsZQAAeJx9kT1Iw0AYht+2SqVUHCwo0iFD1cWCqIijVqEIFUKt0KqDyaV/0KQhSXFxFFwLDv4sVh1cnHV1cBUEwR8QZwcnRRcp8buk0CLGg7t7eO97X+6+A/yNClPNrnFA1SwjnUwI2dyqEHxFCFEM0DoqMVOfE8UUPMfXPXx8v4vzLO+6P0evkjcZ4BOIZ5luWMQbxNObls55nzjCSpJCfE48ZtAFiR+5Lrv8xrnosJ9nRoxMep44QiwUO1juYFYyVOIp4piiapTvz7qscN7irFZqrHVP/sJwXltZ5jrNKJJYxBJECJBRQxkVWIjTrpFiIk3nCQ//kOMXySWTqwxGjgVUoUJy/OB/8Lu3ZmFywk0KJ4DuF9v+GAaCu0Czbtvfx7bdPAECz8CV1vZXG8DMJ+n1thY7Avq2gYvrtibvAZc7wOCTLhmSIwVo+gsF4P2MvikH9N8CoTW3b61znD4AGepV6gY4OARGipS97vHuns6+/VvT6t8Ph1lyr0hzlCAAAA14aVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8P3hwYWNrZXQgYmVnaW49Iu+7vyIgaWQ9Ilc1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCI/Pgo8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA0LjQuMC1FeGl2MiI+CiA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIKICAgIHhtbG5zOnN0RXZ0PSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VFdmVudCMiCiAgICB4bWxuczpkYz0iaHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8iCiAgICB4bWxuczpHSU1QPSJodHRwOi8vd3d3LmdpbXAub3JnL3htcC8iCiAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyIKICAgIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyIKICAgeG1wTU06RG9jdW1lbnRJRD0iZ2ltcDpkb2NpZDpnaW1wOjFiYzFkZjE3LWM5YmMtNGYzZi1hMmEzLTlmODkyNWNiZjY4OSIKICAgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDo4YTUyMWJhMC00YmNlLTQzZWEtYjgyYS04ZGM2MTBjYmZlOTgiCiAgIHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDplODQ3ZjUxNC00MWVlLTQ2ZjYtOTllNC1kNjI3MjMxMjhlZTIiCiAgIGRjOkZvcm1hdD0iaW1hZ2UvcG5nIgogICBHSU1QOkFQST0iMi4wIgogICBHSU1QOlBsYXRmb3JtPSJMaW51eCIKICAgR0lNUDpUaW1lU3RhbXA9IjE3MzAxNTc3NjY5MTI3ODciCiAgIEdJTVA6VmVyc2lvbj0iMi4xMC4zOCIKICAgdGlmZjpPcmllbnRhdGlvbj0iMSIKICAgeG1wOkNyZWF0b3JUb29sPSJHSU1QIDIuMTAiCiAgIHhtcDpNZXRhZGF0YURhdGU9IjIwMjQ6MTA6MjhUMTY6MjI6NDYtMDc6MDAiCiAgIHhtcDpNb2RpZnlEYXRlPSIyMDI0OjEwOjI4VDE2OjIyOjQ2LTA3OjAwIj4KICAgPHhtcE1NOkhpc3Rvcnk+CiAgICA8cmRmOlNlcT4KICAgICA8cmRmOmxpCiAgICAgIHN0RXZ0OmFjdGlvbj0ic2F2ZWQiCiAgICAgIHN0RXZ0OmNoYW5nZWQ9Ii8iCiAgICAgIHN0RXZ0Omluc3RhbmNlSUQ9InhtcC5paWQ6ZTVjOTM2ZDYtYjMzYi00NzM4LTlhNWUtYjM3YTA5MzdjZDAxIgogICAgICBzdEV2dDpzb2Z0d2FyZUFnZW50PSJHaW1wIDIuMTAgKExpbnV4KSIKICAgICAgc3RFdnQ6d2hlbj0iMjAyNC0xMC0yOFQxNjoyMjo0Ni0wNzowMCIvPgogICAgPC9yZGY6U2VxPgogICA8L3htcE1NOkhpc3Rvcnk+CiAgPC9yZGY6RGVzY3JpcHRpb24+CiA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgCjw/eHBhY2tldCBlbmQ9InciPz5/5aQ8AAAABmJLR0QAcgByAAAtJLTuAAAACXBIWXMAAABZAAAAWQGqnamGAAAAB3RJTUUH6AocFxYuv5vOJAAAAHhJREFUKM+NzzEOQXEMB+DPYDY5iEVMIpzDfRxC3mZyBK7gChZnELGohaR58f7a7dd8bVq4YaVQgTvWFVjCUcXxA28qcBBHFUcVRwWPPuFfXVsbt0PPnLBL+dKHL+wxxhSPhBcZznuDXYKH1uGzBJ+YtPAZRyy/jTd7qEoydWUQ7QAAAABJRU5ErkJggg==\"\n",
    "CURSOR_BYTES = base64.b64decode(CURSOR_64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[5], line 162\u001b[0m\n\u001b[1;32m    160\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m step_idx \u001b[38;5;241m==\u001b[39m \u001b[38;5;28mlen\u001b[39m(subset) \u001b[38;5;241m-\u001b[39m \u001b[38;5;241m1\u001b[39m:\n\u001b[1;32m    161\u001b[0m     decoded_img \u001b[38;5;241m=\u001b[39m base64\u001b[38;5;241m.\u001b[39mb64decode(step[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mstate\u001b[39m\u001b[38;5;124m\"\u001b[39m][\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mscreenshot\u001b[39m\u001b[38;5;124m\"\u001b[39m])\n\u001b[0;32m--> 162\u001b[0m     marked_image \u001b[38;5;241m=\u001b[39m \u001b[43mmark_screenshot\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdecoded_img\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmouse\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mscrollbar\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    163\u001b[0m     msg_content\u001b[38;5;241m.\u001b[39mappend({\n\u001b[1;32m    164\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtype\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mimage_url\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m    165\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mimage_url\u001b[39m\u001b[38;5;124m\"\u001b[39m: {\n\u001b[1;32m    166\u001b[0m             \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124murl\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdata:image/jpeg;base64,\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmarked_image\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m    167\u001b[0m         },\n\u001b[1;32m    168\u001b[0m     })\n\u001b[1;32m    170\u001b[0m training_example\u001b[38;5;241m.\u001b[39mappend({\n\u001b[1;32m    171\u001b[0m     \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrole\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m'\u001b[39m\u001b[38;5;124muser\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[1;32m    172\u001b[0m     \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcontent\u001b[39m\u001b[38;5;124m\"\u001b[39m: msg_content\n\u001b[1;32m    173\u001b[0m })\n",
      "Cell \u001b[0;32mIn[5], line 81\u001b[0m, in \u001b[0;36mmark_screenshot\u001b[0;34m(img_buffer, mouse_position, scrollbar)\u001b[0m\n\u001b[1;32m     79\u001b[0m output_buffer \u001b[38;5;241m=\u001b[39m io\u001b[38;5;241m.\u001b[39mBytesIO()\n\u001b[1;32m     80\u001b[0m composite\u001b[38;5;241m.\u001b[39msave(output_buffer, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mJPEG\u001b[39m\u001b[38;5;124m\"\u001b[39m, quality\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m85\u001b[39m)\n\u001b[0;32m---> 81\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mbase64\u001b[49m\u001b[38;5;241m.\u001b[39mb64encode(output_buffer\u001b[38;5;241m.\u001b[39mgetvalue())\u001b[38;5;241m.\u001b[39mdecode(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mutf-8\u001b[39m\u001b[38;5;124m'\u001b[39m)\n",
      "Cell \u001b[0;32mIn[5], line 81\u001b[0m, in \u001b[0;36mmark_screenshot\u001b[0;34m(img_buffer, mouse_position, scrollbar)\u001b[0m\n\u001b[1;32m     79\u001b[0m output_buffer \u001b[38;5;241m=\u001b[39m io\u001b[38;5;241m.\u001b[39mBytesIO()\n\u001b[1;32m     80\u001b[0m composite\u001b[38;5;241m.\u001b[39msave(output_buffer, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mJPEG\u001b[39m\u001b[38;5;124m\"\u001b[39m, quality\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m85\u001b[39m)\n\u001b[0;32m---> 81\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mbase64\u001b[49m\u001b[38;5;241m.\u001b[39mb64encode(output_buffer\u001b[38;5;241m.\u001b[39mgetvalue())\u001b[38;5;241m.\u001b[39mdecode(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mutf-8\u001b[39m\u001b[38;5;124m'\u001b[39m)\n",
      "File \u001b[0;32m_pydevd_bundle\\\\pydevd_cython.pyx:1698\u001b[0m, in \u001b[0;36m_pydevd_bundle.pydevd_cython.SafeCallWrapper.__call__\u001b[0;34m()\u001b[0m\n",
      "File \u001b[0;32m_pydevd_bundle\\\\pydevd_cython.pyx:636\u001b[0m, in \u001b[0;36m_pydevd_bundle.pydevd_cython.PyDBFrame.trace_dispatch\u001b[0;34m()\u001b[0m\n",
      "File \u001b[0;32m_pydevd_bundle\\\\pydevd_cython.pyx:1113\u001b[0m, in \u001b[0;36m_pydevd_bundle.pydevd_cython.PyDBFrame.trace_dispatch\u001b[0;34m()\u001b[0m\n",
      "File \u001b[0;32m_pydevd_bundle\\\\pydevd_cython.pyx:1091\u001b[0m, in \u001b[0;36m_pydevd_bundle.pydevd_cython.PyDBFrame.trace_dispatch\u001b[0;34m()\u001b[0m\n",
      "File \u001b[0;32m_pydevd_bundle\\\\pydevd_cython.pyx:496\u001b[0m, in \u001b[0;36m_pydevd_bundle.pydevd_cython.PyDBFrame.do_wait_suspend\u001b[0;34m()\u001b[0m\n",
      "File \u001b[0;32m~/src/cerebellum/training/.venv/lib/python3.11/site-packages/debugpy/_vendored/pydevd/pydevd.py:2197\u001b[0m, in \u001b[0;36mPyDB.do_wait_suspend\u001b[0;34m(self, thread, frame, event, arg, exception_type)\u001b[0m\n\u001b[1;32m   2194\u001b[0m             from_this_thread\u001b[38;5;241m.\u001b[39mappend(frame_custom_thread_id)\n\u001b[1;32m   2196\u001b[0m     \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_threads_suspended_single_notification\u001b[38;5;241m.\u001b[39mnotify_thread_suspended(thread_id, thread, stop_reason):\n\u001b[0;32m-> 2197\u001b[0m         keep_suspended \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_do_wait_suspend\u001b[49m\u001b[43m(\u001b[49m\u001b[43mthread\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mframe\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mevent\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43marg\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtrace_suspend_type\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfrom_this_thread\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mframes_tracker\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   2199\u001b[0m frames_list \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m   2201\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m keep_suspended:\n\u001b[1;32m   2202\u001b[0m     \u001b[38;5;66;03m# This means that we should pause again after a set next statement.\u001b[39;00m\n",
      "File \u001b[0;32m~/src/cerebellum/training/.venv/lib/python3.11/site-packages/debugpy/_vendored/pydevd/pydevd.py:2266\u001b[0m, in \u001b[0;36mPyDB._do_wait_suspend\u001b[0;34m(self, thread, frame, event, arg, trace_suspend_type, from_this_thread, frames_tracker)\u001b[0m\n\u001b[1;32m   2263\u001b[0m                 queue\u001b[38;5;241m.\u001b[39mput(internal_cmd)\n\u001b[1;32m   2264\u001b[0m                 wait_timeout \u001b[38;5;241m=\u001b[39m TIMEOUT_FAST\n\u001b[0;32m-> 2266\u001b[0m         \u001b[43mnotify_event\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwait\u001b[49m\u001b[43m(\u001b[49m\u001b[43mwait_timeout\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   2267\u001b[0m         notify_event\u001b[38;5;241m.\u001b[39mclear()\n\u001b[1;32m   2269\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n",
      "File \u001b[0;32m~/.pyenv/versions/3.11.10/lib/python3.11/threading.py:629\u001b[0m, in \u001b[0;36mEvent.wait\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m    627\u001b[0m signaled \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_flag\n\u001b[1;32m    628\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m signaled:\n\u001b[0;32m--> 629\u001b[0m     signaled \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_cond\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwait\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    630\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m signaled\n",
      "File \u001b[0;32m~/.pyenv/versions/3.11.10/lib/python3.11/threading.py:331\u001b[0m, in \u001b[0;36mCondition.wait\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m    329\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m    330\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m timeout \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[0;32m--> 331\u001b[0m         gotit \u001b[38;5;241m=\u001b[39m \u001b[43mwaiter\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43macquire\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    332\u001b[0m     \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m    333\u001b[0m         gotit \u001b[38;5;241m=\u001b[39m waiter\u001b[38;5;241m.\u001b[39macquire(\u001b[38;5;28;01mFalse\u001b[39;00m)\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "# Post process\n",
    "system_prompt = '''You are an intelligent web browsing agent operating in fullscreen mode to accomplish a specified user goal, detailed in <USER_TASK>.\n",
    "* Use only the Page Down or Page Up keys for scrolling.\n",
    "* If the webpage is scrollable, a gray rectangular scrollbar will appear on the right edge of the screenshot.\n",
    "* Adhere strictly to the instructions in the <IMPORTANT> section below.\n",
    "</SYSTEM_CAPABILITY>\n",
    "\n",
    "Your task is to execute user requests using their browser. After each action, capture a screenshot and thoroughly assess whether the desired outcome has been achieved. Clearly articulate your reasoning for each function call: \"I have evaluated step X...\" If the result is incorrect, attempt the step again. Proceed to the next step only after confirming successful execution. Always utilize a tool for actions and ensure to return a tool call. Remember to invoke the stop_browsing tool upon achieving the task's goal. Prioritize keyboard shortcuts for navigation whenever feasible.\n",
    "\n",
    "<IMPORTANT>\n",
    "* Utilize the user's <USER_DATA> to complete forms as you progress towards the goal.\n",
    "* Ensure a UI element is fully visible before interacting with it.\n",
    "</IMPORTANT>'''\n",
    "\n",
    "\n",
    "import os\n",
    "import json\n",
    "from PIL import Image \n",
    "import io\n",
    "import math\n",
    "\n",
    "cursor_img = Image.open(io.BytesIO(CURSOR_BYTES))\n",
    "\n",
    "def mark_screenshot(\n",
    "    img_buffer: bytes, mouse_position: Coordinate, scrollbar: ScrollBar\n",
    ") -> str:\n",
    "    \"\"\"Adds scrollbar and cursor overlays to a screenshot.\n",
    "    Args:\n",
    "        img_buffer: Raw bytes of the screenshot image\n",
    "        mouse_position: Coordinate object containing x,y position of mouse cursor\n",
    "        scrollbar: ScrollBar object containing scrollbar dimensions and position\n",
    "    Returns:\n",
    "        Base64 string of the modified screenshot with overlays added\n",
    "    Raises:\n",
    "        IOError: If there are issues manipulating the image\n",
    "    \"\"\"\n",
    "    with Image.open(io.BytesIO(img_buffer)) as img:\n",
    "        width, height = img.size\n",
    "\n",
    "        # Create scrollbar overlay\n",
    "        scrollbar_width = 10\n",
    "        scrollbar_height = int(height * scrollbar.height)\n",
    "        scrollbar_top = int(height * scrollbar.offset)\n",
    "\n",
    "        # Create gray rectangle for scrollbar with 70% opacity\n",
    "        scrollbar_img = Image.new(\n",
    "            \"RGBA\", (scrollbar_width, scrollbar_height), (128, 128, 128, int(255 * 0.8))\n",
    "        )\n",
    "\n",
    "        # Create composite image\n",
    "        composite = img.copy()\n",
    "        composite.paste(scrollbar_img, (width - scrollbar_width, scrollbar_top), scrollbar_img)\n",
    "\n",
    "        # Add cursor\n",
    "        composite.paste(\n",
    "            cursor_img,\n",
    "            (max(0, mouse_position.x), max(0, mouse_position.y)),\n",
    "            cursor_img,\n",
    "        )\n",
    "\n",
    "        # Calculate the aspect ratio\n",
    "        aspect_ratio = composite.width / composite.height\n",
    "\n",
    "        # Determine the new dimensions while preserving aspect ratio\n",
    "        if composite.width > 640 or composite.height > 400:\n",
    "            if aspect_ratio > 640 / 400:\n",
    "                new_width = 640\n",
    "                new_height = int(640 / aspect_ratio)\n",
    "            else:\n",
    "                new_height = 400\n",
    "                new_width = int(400 * aspect_ratio)\n",
    "        else:\n",
    "            new_width, new_height = composite.width, composite.height\n",
    "\n",
    "        # Resize the composite image\n",
    "        composite = composite.resize((new_width, new_height), Image.LANCZOS)\n",
    "\n",
    "        # Convert back to base64 string\n",
    "        output_buffer = io.BytesIO()\n",
    "        composite.save(output_buffer, \"JPEG\", quality=85)\n",
    "        return base64.b64encode(output_buffer.getvalue()).decode('utf-8')\n",
    "\n",
    "\n",
    "# Define the directory containing the jsonl files\n",
    "directory = 'mind2web'\n",
    "\n",
    "# Loop through each file in the directory\n",
    "for filename in os.listdir(directory):\n",
    "    if filename.endswith('.jsonl'):\n",
    "        file_path = os.path.join(directory, filename)\n",
    "\n",
    "        print('Processing', filename)\n",
    "        # Open and read each jsonl file\n",
    "        with open(file_path, 'r') as file:\n",
    "            lines = []\n",
    "            for line in file:\n",
    "                # Process each line as a JSON object\n",
    "                json_object = json.loads(line)\n",
    "                lines.append(json_object)\n",
    "\n",
    "            goal = lines.pop(0)[\"goal\"]\n",
    "\n",
    "            starting_directions = []\n",
    "\n",
    "            starting_directions.append({\n",
    "                \"role\": \"system\",\n",
    "                \"content\": system_prompt\n",
    "            })\n",
    "            starting_directions.append({\n",
    "                \"role\": \"user\",\n",
    "                \"content\": f\"<USER_TASK>{goal}</USER_TASK>\\n<USER_DATA>NONE</USER_DATA>\"\n",
    "            })\n",
    "\n",
    "            # Play with this for best behavior\n",
    "            starting_directions.append({\n",
    "                \"role\": \"assistant\",\n",
    "                \"content\": \"\",\n",
    "                \"tool_calls\": [\n",
    "                    {\n",
    "                        'name': 'screenshot',\n",
    "                        'arguments': '{\"reason\": \"Take a screenshot of the browser to understand the current webpage\"}'\n",
    "                    }\n",
    "                ]\n",
    "            })\n",
    "\n",
    "            last_function_name = 'screenshot'\n",
    "\n",
    "            examples = []\n",
    "            \n",
    "            #lines now contains the steps\n",
    "            for i in range(len(lines)):\n",
    "                subset = lines[:i+1]\n",
    "                \n",
    "                training_example = starting_directions.copy()\n",
    "\n",
    "                for step_idx in range(len(subset)):\n",
    "                    step = subset[step_idx]\n",
    "                    state = step[\"state\"]\n",
    "\n",
    "                    scrollbar = ScrollBar(offset=step[\"state\"][\"scrollbar\"][\"offset\"],\n",
    "                                          height=step[\"state\"][\"scrollbar\"][\"height\"])\n",
    "                    mouse = Coordinate(x=math.floor(step[\"state\"][\"mouse\"][\"x\"]), \n",
    "                                       y=math.floor(step[\"state\"][\"mouse\"][\"y\"]))\n",
    "                    \n",
    "                    normalized_mouse_x = mouse.x / float(step[\"state\"][\"width\"])\n",
    "                    normalized_mouse_y = mouse.y / float(step[\"state\"][\"height\"])\n",
    "\n",
    "                    training_example.append({\n",
    "                        \"role\": 'tool',\n",
    "                        'name': last_function_name,\n",
    "                        \"content\": '{\"result\": \"Action completed successfully\"}'\n",
    "                    })\n",
    "                    \n",
    "                    msg_content = []\n",
    "                    msg_content.append({\n",
    "                        \"type\": \"text\",\n",
    "                        \"value\": f\"After action mouse cursor is at X: {normalized_mouse_x}, Y: {normalized_mouse_y}\\n`\"\n",
    "                    })\n",
    "\n",
    "                    if step_idx == len(subset) - 1:\n",
    "                        decoded_img = base64.b64decode(step[\"state\"][\"screenshot\"])\n",
    "                        marked_image = mark_screenshot(decoded_img, mouse, scrollbar)\n",
    "                        msg_content.append({\n",
    "                            \"type\": \"image_url\",\n",
    "                            \"image_url\": {\n",
    "                                \"url\": f\"data:image/jpeg;base64,{marked_image}\"\n",
    "                            },\n",
    "                        })\n",
    "\n",
    "                    training_example.append({\n",
    "                        \"role\": 'user',\n",
    "                        \"content\": msg_content\n",
    "                    })\n",
    "\n",
    "                    action_arg = {\n",
    "                        \"reason\": step[\"action\"][\"reasoning\"],\n",
    "                    }\n",
    "\n",
    "                    if step[\"action\"][\"coordinate\"]:\n",
    "                        [x, y] = step[\"action\"][\"coordinate\"]\n",
    "                        norm_x = float(x) / float(step[\"state\"][\"width\"])\n",
    "                        norm_y = float(y) / float(step[\"state\"][\"height\"])\n",
    "                        action_arg[\"coordinate\"] = (norm_x, norm_y)\n",
    "\n",
    "                    if step[\"action\"][\"text\"]:\n",
    "                        action_arg[\"text\"] = step[\"action\"][\"text\"]\n",
    "\n",
    "                    training_example.append({\n",
    "                        \"role\": \"assistant\",\n",
    "                        \"content\": \"\",\n",
    "                        \"tool_calls\": [\n",
    "                            {\n",
    "                                'name': step[\"action\"][\"action\"],\n",
    "                                'arguments': json.dumps(action_arg)\n",
    "                            }\n",
    "                        ]\n",
    "                    })\n",
    "\n",
    "                    last_function_name = step[\"action\"][\"action\"]\n",
    "                \n",
    "                examples.append(training_example)\n",
    "            \n",
    "            with open(f'molmo/{filename}.jsonl', 'w') as jsonl_file:\n",
    "                for example in examples:\n",
    "                    jsonl_entry = json.dumps(example)\n",
    "                    jsonl_file.write(jsonl_entry + '\\n')\n",
    "\n",
    "\n",
    "\n",
    "                \n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
