{
  "nbformat": 4,
  "nbformat_minor": 1,
  "metadata": {},
  "cells": [
    {
      "metadata": {},
      "source": [
        "<td>\n",
        "   <a target=\"_blank\" href=\"https://labelbox.com\" ><img src=\"https://labelbox.com/blog/content/images/2021/02/logo-v4.svg\" width=256/></a>\n",
        "</td>"
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "<td>\n",
        "<a href=\"https://colab.research.google.com/github/Labelbox/labelbox-python/blob/master/examples/annotation_import/pdf.ipynb\" target=\"_blank\"><img\n",
        "src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
        "</td>\n",
        "\n",
        "<td>\n",
        "<a href=\"https://github.com/Labelbox/labelbox-python/tree/master/examples/annotation_import/pdf.ipynb\" target=\"_blank\"><img\n",
        "src=\"https://img.shields.io/badge/GitHub-100000?logo=github&logoColor=white\" alt=\"GitHub\"></a>\n",
        "</td>"
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "# PDF Annotation Import"
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "\n",
        "Supported annotations for PDF assets \n",
        "\n",
        "*Annotation types*\n",
        "- Checklist classification (including nested classifications)\n",
        "- Radio classifications (including nested classifications)\n",
        "- Free text classifications\n",
        "- Bounding box\n",
        "- Entities\n",
        "- Relationships (only supported for MAL imports)\n",
        "\n",
        "\n",
        "*NDJson*\n",
        "- Checklist classification (including nested classifications)\n",
        "- Radio classifications (including nested classifications)\n",
        "- Free text classifications\n",
        "- Bounding box \n",
        "- Entities \n",
        "- Relationships (only supported for MAL imports)"
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "### Setup"
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "!pip install -q \"labelbox[data]\""
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "import uuid\n",
        "import json\n",
        "import requests\n",
        "import labelbox as lb\n",
        "import labelbox.types as lb_types\n"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "### Replace with your API key\n",
        "Guides on https://docs.labelbox.com/docs/create-an-api-key"
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "# Add your api key\n",
        "API_KEY = \"\"\n",
        "client = lb.Client(api_key=API_KEY)"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "### Supported Annotations"
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "########## Entity ##########\n",
        "\n",
        "# Annotation Types\n",
        "entities_annotations = lb_types.ObjectAnnotation(\n",
        "    name=\"named_entity\",\n",
        "    value= lb_types.DocumentEntity(\n",
        "        name=\"named_entity\",\n",
        "        textSelections=[\n",
        "            lb_types.DocumentTextSelection(\n",
        "                token_ids=[],\n",
        "                group_id=\"\",\n",
        "                page=1\n",
        "            )\n",
        "        ]\n",
        "    )\n",
        ")\n",
        "\n",
        "# NDJSON\n",
        "entities_annotations_ndjson = {\n",
        "    \"name\": \"named_entity\",\n",
        "    \"textSelections\": [\n",
        "        {\n",
        "            \"tokenIds\": [\n",
        "                \"<UUID>\",\n",
        "            ],\n",
        "            \"groupId\": \"<UUID>\",\n",
        "            \"page\": 1,\n",
        "        }\n",
        "    ]\n",
        "}"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "########### Radio Classification #########\n",
        "\n",
        "# Annotation types\n",
        "radio_annotation = lb_types.ClassificationAnnotation(\n",
        "    name=\"radio_question\",\n",
        "    value=lb_types.Radio(answer =\n",
        "        lb_types.ClassificationAnswer(name = \"first_radio_answer\")\n",
        "    )\n",
        ")\n",
        "# NDJSON\n",
        "radio_annotation_ndjson = {\n",
        "  \"name\": \"radio_question\",\n",
        "  \"answer\": {\"name\": \"first_radio_answer\"}\n",
        "}"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "############ Checklist Classification ###########\n",
        "\n",
        "# Annotation types\n",
        "checklist_annotation = lb_types.ClassificationAnnotation(\n",
        "    name=\"checklist_question\",\n",
        "    value=lb_types.Checklist(answer = [\n",
        "        lb_types.ClassificationAnswer(name = \"first_checklist_answer\"),\n",
        "        lb_types.ClassificationAnswer(name = \"second_checklist_answer\")\n",
        "    ])\n",
        "  )\n",
        "\n",
        "\n",
        "# NDJSON\n",
        "checklist_annotation_ndjson = {\n",
        "  \"name\": \"checklist_question\",\n",
        "  \"answer\": [\n",
        "    {\"name\": \"first_checklist_answer\"},\n",
        "    {\"name\": \"second_checklist_answer\"}\n",
        "  ]\n",
        "}"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "############ Bounding Box ###########\n",
        "\n",
        "bbox_annotation = lb_types.ObjectAnnotation(\n",
        "    name=\"bounding_box\",  # must match your ontology feature\"s name\n",
        "    value=lb_types.DocumentRectangle(\n",
        "        start=lb_types.Point(x=102.771, y=135.3),  # x = left, y = top\n",
        "        end=lb_types.Point(x=518.571, y=245.143),  # x= left + width , y = top + height\n",
        "        page=0,\n",
        "        unit=lb_types.RectangleUnit.POINTS\n",
        "        )\n",
        "    )\n",
        "\n",
        "bbox_annotation_ndjson = {\n",
        "  \"name\": \"bounding_box\",\n",
        "  \"bbox\": {\n",
        "            \"top\": 135.3,\n",
        "            \"left\": 102.771,\n",
        "            \"height\": 109.843,\n",
        "            \"width\": 415.8\n",
        "      },\n",
        "  \"page\": 0,\n",
        "  \"unit\": \"POINTS\"\n",
        "}"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "# ############ global nested classifications ###########\n",
        "\n",
        "nested_checklist_annotation = lb_types.ClassificationAnnotation(\n",
        "  name=\"nested_checklist_question\",\n",
        "  value=lb_types.Checklist(\n",
        "    answer=[lb_types.ClassificationAnswer(\n",
        "      name=\"first_checklist_answer\",\n",
        "      classifications=[\n",
        "        lb_types.ClassificationAnnotation(\n",
        "          name=\"sub_checklist_question\",\n",
        "          value=lb_types.Checklist(\n",
        "            answer=[lb_types.ClassificationAnswer(\n",
        "            name=\"first_sub_checklist_answer\"\n",
        "          )]\n",
        "        ))\n",
        "      ]\n",
        "    )]\n",
        "  )\n",
        ")\n",
        "\n",
        "nested_checklist_annotation_ndjson = {\n",
        "  \"name\": \"nested_checklist_question\",\n",
        "  \"answer\": [{\n",
        "      \"name\": \"first_checklist_answer\",\n",
        "      \"classifications\" : [\n",
        "        {\n",
        "          \"name\": \"sub_checklist_question\",\n",
        "          \"answer\": {\"name\": \"first_sub_checklist_answer\"}\n",
        "        }\n",
        "      ]\n",
        "  }]\n",
        "}\n",
        "\n",
        "nested_radio_annotation = lb_types.ClassificationAnnotation(\n",
        "  name=\"nested_radio_question\",\n",
        "  value=lb_types.Radio(\n",
        "    answer=lb_types.ClassificationAnswer(\n",
        "      name=\"first_radio_answer\",\n",
        "      classifications=[\n",
        "        lb_types.ClassificationAnnotation(\n",
        "          name=\"sub_radio_question\",\n",
        "          value=lb_types.Radio(\n",
        "            answer=lb_types.ClassificationAnswer(\n",
        "              name=\"first_sub_radio_answer\"\n",
        "            )\n",
        "          )\n",
        "        )\n",
        "      ]\n",
        "    )\n",
        "  )\n",
        ")\n",
        "\n",
        "\n",
        "nested_radio_annotation_ndjson = {\n",
        "  \"name\": \"nested_radio_question\",\n",
        "  \"answer\": {\n",
        "      \"name\": \"first_radio_answer\",\n",
        "      \"classifications\": [{\n",
        "          \"name\":\"sub_radio_question\",\n",
        "          \"answer\": { \"name\" : \"first_sub_radio_answer\"}\n",
        "        }]\n",
        "    }\n",
        "}\n",
        "\n"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "############## Classification Free-form text ##############\n",
        "\n",
        "text_annotation = lb_types.ClassificationAnnotation(\n",
        "  name=\"free_text\",  # must match your ontology feature\"s name\n",
        "  value=lb_types.Text(answer=\"sample text\")\n",
        ")\n",
        "\n",
        "\n",
        "text_annotation_ndjson = {\n",
        "  \"name\": \"free_text\",\n",
        "  \"answer\": \"sample text\"\n",
        "}"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "######### BBOX with nested classifications #########\n",
        "\n",
        "bbox_with_radio_subclass_annotation = lb_types.ObjectAnnotation(\n",
        "    name=\"bbox_with_radio_subclass\",\n",
        "    value=lb_types.DocumentRectangle(\n",
        "        start=lb_types.Point(x=317.271, y=226.757), # x = left, y = top\n",
        "        end=lb_types.Point(x=566.657, y=420.986), # x= left + width , y = top + height\n",
        "        unit=lb_types.RectangleUnit.POINTS,\n",
        "        page=1\n",
        "    ),\n",
        "    classifications=[\n",
        "    \tlb_types.ClassificationAnnotation(\n",
        "        \tname=\"sub_radio_question\",\n",
        "      \t\tvalue=lb_types.Radio(\n",
        "          answer=lb_types.ClassificationAnswer(\n",
        "            name=\"first_sub_radio_answer\",\n",
        "            classifications=[\n",
        "              lb_types.ClassificationAnnotation(\n",
        "                name=\"second_sub_radio_question\",\n",
        "                value=lb_types.Radio(\n",
        "                  answer=lb_types.ClassificationAnswer(\n",
        "                    name=\"second_sub_radio_answer\"\n",
        "                  )\n",
        "                )\n",
        "              )\n",
        "            ]\n",
        "          )\n",
        "          )\n",
        "        )\n",
        "    ]\n",
        ")\n",
        "\n",
        "bbox_with_radio_subclass_annotation_ndjson = {\n",
        "  \"name\": \"bbox_with_radio_subclass\",\n",
        "  \"classifications\": [\n",
        "    {\n",
        "      \"name\": \"sub_radio_question\",\n",
        "      \"answer\": {\n",
        "          \"name\": \"first_sub_radio_answer\",\n",
        "          \"classifications\": [\n",
        "              {\n",
        "                  \"name\": \"second_sub_radio_question\",\n",
        "                  \"answer\": {\n",
        "                      \"name\": \"second_sub_radio_answer\"}\n",
        "               }\n",
        "            ]\n",
        "        }\n",
        "    }\n",
        "  ],\n",
        "  \"bbox\": {\n",
        "        \"top\": 226.757,\n",
        "        \"left\": 317.271,\n",
        "        \"height\": 194.229,\n",
        "        \"width\": 249.386\n",
        "    },\n",
        "  \"page\": 1,\n",
        "  \"unit\": \"POINTS\"\n",
        "}"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "############ NER with nested classifications ########\n",
        "\n",
        "\n",
        "ner_with_checklist_subclass_annotation = lb_types.ObjectAnnotation(\n",
        "  name=\"ner_with_checklist_subclass\",\n",
        "  value=lb_types.DocumentEntity(\n",
        "    name=\"ner_with_checklist_subclass\",\n",
        "    text_selections=[\n",
        "      lb_types.DocumentTextSelection(\n",
        "        token_ids=[],\n",
        "        group_id=\"\",\n",
        "        page=1\n",
        "      )\n",
        "    ]\n",
        "  ),\n",
        "  classifications=[\n",
        "    lb_types.ClassificationAnnotation(\n",
        "      name=\"sub_checklist_question\",\n",
        "      value=lb_types.Checklist(\n",
        "      answer=[lb_types.ClassificationAnswer(name=\"first_sub_checklist_answer\")]\n",
        "      )\n",
        "    )\n",
        "  ]\n",
        ")\n",
        "\n",
        "\n",
        "ner_with_checklist_subclass_annotation_ndjson = {\n",
        "  \"name\": \"ner_with_checklist_subclass\",\n",
        "  \"classifications\":[\n",
        "    {\n",
        "      \"name\": \"sub_checklist_question\",\n",
        "      \"answer\": [{\"name\": \"first_sub_checklist_answer\"}]\n",
        "    }\n",
        "  ],\n",
        "  \"textSelections\": [\n",
        "      {\n",
        "          \"tokenIds\": [\n",
        "              \"<UUID>\"\n",
        "          ],\n",
        "          \"groupId\": \"<UUID>\",\n",
        "          \"page\": 1\n",
        "      }\n",
        "  ]\n",
        "}\n",
        "\n"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "######### Relationships ##########\n",
        "entity_source = lb_types.ObjectAnnotation(\n",
        "    name=\"named_entity\",\n",
        "    value= lb_types.DocumentEntity(\n",
        "        name=\"named_entity\",\n",
        "        textSelections=[\n",
        "            lb_types.DocumentTextSelection(\n",
        "                token_ids=[],\n",
        "                group_id=\"\",\n",
        "                page=1\n",
        "            )\n",
        "        ]\n",
        "    )\n",
        ")\n",
        "\n",
        "entity_target = lb_types.ObjectAnnotation(\n",
        "    name=\"named_entity\",\n",
        "    value=lb_types.DocumentEntity(\n",
        "      name=\"named_entity\",\n",
        "      textSelections=[\n",
        "        lb_types.DocumentTextSelection(\n",
        "          token_ids=[],\n",
        "          group_id=\"\",\n",
        "          page=1\n",
        "        )\n",
        "      ]\n",
        "    )\n",
        ")\n",
        "\n",
        "entity_relationship = lb_types.RelationshipAnnotation(\n",
        "    name=\"relationship\",\n",
        "    value=lb_types.Relationship(\n",
        "        source=entity_source,\n",
        "        target=entity_target,\n",
        "        type=lb_types.Relationship.Type.UNIDIRECTIONAL,\n",
        "    ))\n",
        "\n",
        "## Only supported for MAL imports\n",
        "uuid_source = str(uuid.uuid4())\n",
        "uuid_target = str(uuid.uuid4())\n",
        "\n",
        "entity_source_ndjson = {\n",
        "  \"name\": \"named_entity\",\n",
        "  \"uuid\": uuid_source,\n",
        "  \"textSelections\": [\n",
        "    {\n",
        "      \"tokenIds\": [\n",
        "        \"<UUID>\"\n",
        "      ],\n",
        "      \"groupId\": \"<UUID>\",\n",
        "      \"page\": 1\n",
        "    }\n",
        "  ]\n",
        "\n",
        "}\n",
        "\n",
        "entity_target_ndjson = {\n",
        "  \"name\": \"named_entity\",\n",
        "  \"uuid\": uuid_target,\n",
        "  \"textSelections\": [\n",
        "    {\n",
        "      \"tokenIds\": [\n",
        "        \"<UUID>\"\n",
        "      ],\n",
        "      \"groupId\": \"<UUID>\",\n",
        "      \"page\": 1\n",
        "    }\n",
        "  ]\n",
        "}\n",
        "ner_relationship_annotation_ndjson = {\n",
        "    \"name\": \"relationship\",\n",
        "    \"relationship\": {\n",
        "      \"source\": uuid_source,\n",
        "      \"target\": uuid_target,\n",
        "      \"type\": \"unidirectional\"\n",
        "    }\n",
        "}\n",
        "\n",
        "\n"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "######### BBOX with relationships #############\n",
        "# Python Annotation\n",
        "bbox_source = lb_types.ObjectAnnotation(\n",
        "    name=\"bounding_box\",\n",
        "    value=lb_types.DocumentRectangle(\n",
        "        start=lb_types.Point(x=188.257, y=68.875), # x = left, y = top\n",
        "        end=lb_types.Point(x=270.907, y=149.556), # x = left + width , y = top + height\n",
        "        unit=lb_types.RectangleUnit.POINTS,\n",
        "        page=1\n",
        "    ),\n",
        ")\n",
        "\n",
        "bbox_target = lb_types.ObjectAnnotation(\n",
        "    name=\"bounding_box\",\n",
        "    value=lb_types.DocumentRectangle(\n",
        "        start=lb_types.Point(x=96.424, y=66.251),\n",
        "        end=lb_types.Point(x=179.074, y=146.932),\n",
        "        unit=lb_types.RectangleUnit.POINTS,\n",
        "        page=1\n",
        "    ),\n",
        ")\n",
        "\n",
        "bbox_relationship = lb_types.RelationshipAnnotation(\n",
        "    name=\"relationship\",\n",
        "    value=lb_types.Relationship(\n",
        "        source=bbox_source,\n",
        "        target=bbox_target,\n",
        "        type=lb_types.Relationship.Type.UNIDIRECTIONAL,\n",
        "    ))\n",
        "\n",
        "\n",
        "## Only supported for MAL imports\n",
        "uuid_source_2 = str(uuid.uuid4())\n",
        "uuid_target_2 = str(uuid.uuid4())\n",
        "\n",
        "bbox_source_ndjson = {\n",
        "  \"name\": \"bounding_box\",\n",
        "  \"uuid\": uuid_source_2,\n",
        "  \"bbox\":  {\n",
        "            \"top\": 68.875,\n",
        "            \"left\": 188.257,\n",
        "            \"height\": 80.681,\n",
        "            \"width\": 82.65\n",
        "        },\n",
        "  \"page\": 1,\n",
        "  \"unit\": \"POINTS\"\n",
        "}\n",
        "\n",
        "bbox_target_ndjson = {\n",
        "  \"name\": \"bounding_box\",\n",
        "  \"uuid\": uuid_target_2,\n",
        "  \"bbox\":  {\n",
        "            \"top\": 66.251,\n",
        "            \"left\": 96.424,\n",
        "            \"height\": 80.681,\n",
        "            \"width\": 82.65\n",
        "        },\n",
        "  \"page\": 1,\n",
        "  \"unit\": \"POINTS\"\n",
        "}\n",
        "\n",
        "bbox_relationship_annotation_ndjson = {\n",
        "    \"name\": \"relationship\",\n",
        "    \"relationship\": {\n",
        "      \"source\": uuid_source_2,\n",
        "      \"target\": uuid_target_2,\n",
        "      \"type\": \"unidirectional\"\n",
        "    }\n",
        "}"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "## Upload Annotations - putting it all together "
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "### Step 1: Import data rows into Catalog "
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "Passing a `text_layer_url` is not longer required. Labelbox automatically generates a text layer using Google Document AI and its OCR engine to detect tokens. \n",
        "\n",
        "However, it's important to note that Google Document AI imposes specific restrictions on document size:\n",
        "- The document must have no more than 15 pages.\n",
        "- The file size should not exceed 20 MB.\n",
        "\n",
        "Furthermore, Google Document AI optimizes documents before OCR processing. This optimization might include rotating images or pages to ensure that text appears horizontally. Consequently, token coordinates are calculated based on the rotated/optimized images, resulting in potential discrepancies with the original PDF document.\n",
        "\n",
        "For example, in a landscape-oriented PDF, the document is rotated by 90 degrees before processing. As a result, all tokens in the text layer are also rotated by 90 degrees.\n",
        "\n",
        "You may still pass a `text_layer_url` if you wish to bypass the automatic text layer generation\n"
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "global_key = \"0801.3483_doc.pdf\"\n",
        "img_url = {\n",
        "    \"row_data\": {\n",
        "      \"pdf_url\": \"https://storage.googleapis.com/labelbox-datasets/arxiv-pdf/data/99-word-token-pdfs/0801.3483.pdf\"\n",
        "    },\n",
        "    \"global_key\": global_key\n",
        "}\n",
        "\n",
        "\n",
        "dataset = client.create_dataset(name=\"pdf_demo_dataset\")\n",
        "task = dataset.create_data_rows([img_url])\n",
        "task.wait_till_done()\n",
        "print(f\"Failed data rows: {task.failed_data_rows}\")\n",
        "print(f\"Errors: {task.errors}\")\n",
        "\n",
        "if task.errors:\n",
        "    for error in task.errors:\n",
        "        if 'Duplicate global key' in error['message'] and dataset.row_count == 0:\n",
        "            # If the global key already  exists in the workspace the dataset will be created empty, so we can delete it.\n",
        "            print(f\"Deleting empty dataset: {dataset}\")\n",
        "            dataset.delete()"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "### Step 2: Create/select an Ontology for your project\n",
        "\n"
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "## Setup the ontology and link the tools created above.\n",
        "\n",
        "ontology_builder = lb.OntologyBuilder(\n",
        "  classifications=[ # List of Classification objects\n",
        "    lb.Classification(\n",
        "      class_type=lb.Classification.Type.RADIO,\n",
        "      name=\"radio_question\",\n",
        "      scope = lb.Classification.Scope.GLOBAL,\n",
        "      options=[\n",
        "        lb.Option(value=\"first_radio_answer\"),\n",
        "        lb.Option(value=\"second_radio_answer\")\n",
        "      ]\n",
        "    ),\n",
        "    lb.Classification(\n",
        "      class_type=lb.Classification.Type.CHECKLIST,\n",
        "      name=\"checklist_question\",\n",
        "      scope = lb.Classification.Scope.GLOBAL,\n",
        "      options=[\n",
        "        lb.Option(value=\"first_checklist_answer\"),\n",
        "        lb.Option(value=\"second_checklist_answer\")\n",
        "      ]\n",
        "    ),\n",
        "    lb.Classification(\n",
        "      class_type=lb.Classification.Type.TEXT,\n",
        "      name=\"free_text\",\n",
        "      scope = lb.Classification.Scope.GLOBAL\n",
        "    ),\n",
        "    lb.Classification(\n",
        "        class_type=lb.Classification.Type.RADIO,\n",
        "        name=\"nested_radio_question\",\n",
        "        scope = lb.Classification.Scope.GLOBAL,\n",
        "        options=[\n",
        "            lb.Option(\"first_radio_answer\",\n",
        "                options=[\n",
        "                    lb.Classification(\n",
        "                        class_type=lb.Classification.Type.RADIO,\n",
        "                        name=\"sub_radio_question\",\n",
        "                        options=[lb.Option(\"first_sub_radio_answer\")]\n",
        "                    )\n",
        "                ])\n",
        "          ]\n",
        "    ),\n",
        "    lb.Classification(\n",
        "      class_type=lb.Classification.Type.CHECKLIST,\n",
        "      name=\"nested_checklist_question\",\n",
        "      scope = lb.Classification.Scope.GLOBAL,\n",
        "      options=[\n",
        "          lb.Option(\"first_checklist_answer\",\n",
        "            options=[\n",
        "              lb.Classification(\n",
        "                  class_type=lb.Classification.Type.CHECKLIST,\n",
        "                  name=\"sub_checklist_question\",\n",
        "                  options=[lb.Option(\"first_sub_checklist_answer\")]\n",
        "              )\n",
        "          ])\n",
        "      ]\n",
        "    ),\n",
        "  ],\n",
        "  tools=[ # List of Tool objects\n",
        "    lb.Tool( tool=lb.Tool.Type.BBOX,name=\"bounding_box\"),\n",
        "    lb.Tool(tool=lb.Tool.Type.NER, name=\"named_entity\"),\n",
        "    lb.Tool(tool=lb.Tool.Type.RELATIONSHIP,name=\"relationship\"),\n",
        "    lb.Tool(tool=lb.Tool.Type.NER,\n",
        "            name=\"ner_with_checklist_subclass\",\n",
        "            classifications=[\n",
        "              lb.Classification(\n",
        "                class_type=lb.Classification.Type.CHECKLIST,\n",
        "                name=\"sub_checklist_question\",\n",
        "                options=[\n",
        "                  lb.Option(value=\"first_sub_checklist_answer\")\n",
        "                ]\n",
        "              )\n",
        "          ]),\n",
        "    lb.Tool( tool=lb.Tool.Type.BBOX,\n",
        "            name=\"bbox_with_radio_subclass\",\n",
        "            classifications=[\n",
        "              lb.Classification(\n",
        "                  class_type=lb.Classification.Type.RADIO,\n",
        "                  name=\"sub_radio_question\",\n",
        "                  options=[\n",
        "                    lb.Option(\n",
        "                      value=\"first_sub_radio_answer\" ,\n",
        "                      options=[\n",
        "                        lb.Classification(\n",
        "                          class_type=lb.Classification.Type.RADIO,\n",
        "                          name=\"second_sub_radio_question\",\n",
        "                          options=[lb.Option(\"second_sub_radio_answer\")]\n",
        "                        )]\n",
        "                    )]\n",
        "                )]\n",
        "      )]\n",
        ")\n",
        "\n",
        "ontology = client.create_ontology(\"Document Annotation Import Demo\",\n",
        "                                  ontology_builder.asdict(),\n",
        "                                  media_type=lb.MediaType.Document)"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "### Step 3: Creating a labeling project"
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "# Create a Labelbox project\n",
        "project = client.create_project(name=\"PDF_annotation_demo\",\n",
        "                                    media_type=lb.MediaType.Document)\n",
        "project.setup_editor(ontology)"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "### Step 4: Send a batch of data rows to the project"
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "project.create_batch(\n",
        "  \"PDF_annotation_batch\", # Each batch in a project must have a unique name\n",
        "  global_keys=[global_key], # Paginated collection of data row objects, list of data row ids or global keys\n",
        "  priority=5 # priority between 1(Highest) - 5(lowest)\n",
        ")"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "### Step 5. Create the annotation payload\n",
        "Create the annotations payload using the snippets of code in Supported predictions section.\n",
        "\n",
        "Labelbox support NDJSON only for this data type.\n",
        "\n",
        "The resulting label should have exactly the same content for annotations that are supported by both (with exception of the uuid strings that are generated)"
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "##### Step 5.1: First, we need to populate the text selections for Entity annotations\n",
        "To import ner annotations, you must pass a `text_layer_url`, Labelbox automatically generates a `text_layer_url` after importing a pdf asset that doesn't include a `text_layer_url`\n"
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "To extract the generated text layer url we first need to export the data row"
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "\n",
        "client.enable_experimental = True\n",
        "task = lb.DataRow.export(client=client,global_keys=[global_key])\n",
        "task.wait_till_done()\n",
        "stream = task.get_stream()\n",
        "\n",
        "text_layer = \"\"\n",
        "for output in stream:\n",
        "    output_json = json.loads(output.json_str)\n",
        "    text_layer = output_json['media_attributes']['text_layer_url']\n",
        "print(text_layer)"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "\n",
        "# Helper method\n",
        "def update_text_selections(annotation, group_id, list_tokens, page):\n",
        "  return annotation.update({\n",
        "    \"textSelections\": [\n",
        "      {\n",
        "        \"groupId\": group_id,\n",
        "        \"tokenIds\": list_tokens,\n",
        "        \"page\": page\n",
        "      }\n",
        "    ]\n",
        "  })\n",
        "\n",
        "\n",
        "# Fetch the content of the text layer\n",
        "res = requests.get(text_layer)\n",
        "\n",
        "# Phrases that we want to annotation obtained from the text layer url\n",
        "content_phrases = [\"Metal-insulator (MI) transitions have been one of the\" ,\n",
        "                   \"T. Sasaki, N. Yoneyama, and N. Kobayashi\",\n",
        "                   \"Organic charge transfer salts based on the donor\",\n",
        "                   \"the experimental investigations on this issue have not\"]\n",
        "\n",
        "# Parse the text layer\n",
        "text_selections = []\n",
        "text_selections_ner = []\n",
        "text_selections_source = []\n",
        "text_selections_target = []\n",
        "\n",
        "for obj in json.loads(res.text):\n",
        "  for group in obj[\"groups\"]:\n",
        "    if group[\"content\"] == content_phrases[0]:\n",
        "      list_tokens = [x[\"id\"] for x in group[\"tokens\"]]\n",
        "      # build text selections for Python Annotation Types\n",
        "      document_text_selection = lb_types.DocumentTextSelection(groupId=group[\"id\"], tokenIds=list_tokens, page=1)\n",
        "      text_selections.append(document_text_selection)\n",
        "      # build text selection for the NDJson annotations\n",
        "      update_text_selections(annotation=entities_annotations_ndjson,\n",
        "                             group_id=group[\"id\"], # id representing group of words\n",
        "                             list_tokens=list_tokens, # ids representing individual words from the group\n",
        "                             page=1)\n",
        "    if group[\"content\"] == content_phrases[1]:\n",
        "      list_tokens_2 = [x[\"id\"] for x in group[\"tokens\"]]\n",
        "      # build text selections for Python Annotation Types\n",
        "      ner_text_selection = lb_types.DocumentTextSelection(groupId=group[\"id\"], tokenIds=list_tokens_2, page=1)\n",
        "      text_selections_ner.append(ner_text_selection)\n",
        "      # build text selection for the NDJson annotations\n",
        "      update_text_selections(annotation=ner_with_checklist_subclass_annotation_ndjson,\n",
        "                             group_id=group[\"id\"], # id representing group of words\n",
        "                             list_tokens=list_tokens_2, # ids representing individual words from the group\n",
        "                             page=1)\n",
        "    if group[\"content\"] == content_phrases[2]:\n",
        "      relationship_source = [x[\"id\"] for x in group[\"tokens\"]]\n",
        "      # build text selections for Python Annotation Types\n",
        "      text_selection_entity_source = lb_types.DocumentTextSelection(groupId=group[\"id\"], tokenIds=relationship_source, page=1)\n",
        "      text_selections_source.append(text_selection_entity_source)\n",
        "      # build text selection for the NDJson annotations\n",
        "      update_text_selections(annotation=entity_source_ndjson,\n",
        "                             group_id=group[\"id\"], # id representing group of words\n",
        "                             list_tokens=relationship_source, # ids representing individual words from the group\n",
        "                             page=1)\n",
        "    if group[\"content\"] == content_phrases[3]:\n",
        "        relationship_target = [x[\"id\"] for x in group[\"tokens\"]]\n",
        "        # build text selections for Python Annotation Types\n",
        "        text_selection_entity_target =  lb_types.DocumentTextSelection(group_id=group[\"id\"], tokenIds=relationship_target, page=1)\n",
        "        text_selections_target.append(text_selection_entity_target)\n",
        "        # build text selections forthe NDJson annotations\n",
        "        update_text_selections(annotation=entity_target_ndjson,\n",
        "                               group_id=group[\"id\"], # id representing group of words\n",
        "                               list_tokens=relationship_target, # ids representing individual words from the group\n",
        "                               page=1)\n"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "Re-write the python annotations to include text selections (only required for python annotation types)"
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "#re-write the entity annotation with text selections\n",
        "entities_annotation_document_entity = lb_types.DocumentEntity(name=\"named_entity\", textSelections = text_selections)\n",
        "entities_annotation = lb_types.ObjectAnnotation(name=\"named_entity\",value=entities_annotation_document_entity)\n",
        "\n",
        "# re-write the entity annotation + subclassification with text selections\n",
        "classifications = [\n",
        "    lb_types.ClassificationAnnotation(\n",
        "      name=\"sub_checklist_question\",\n",
        "      value=lb_types.Checklist(\n",
        "      answer=[lb_types.ClassificationAnswer(name=\"first_sub_checklist_answer\")]\n",
        "      )\n",
        "    )\n",
        "  ]\n",
        "ner_annotation_with_subclass = lb_types.DocumentEntity(name=\"ner_with_checklist_subclass\", textSelections= text_selections_ner)\n",
        "ner_with_checklist_subclass_annotation = lb_types.ObjectAnnotation(name=\"ner_with_checklist_subclass\",\n",
        "                                                                   value=ner_annotation_with_subclass,\n",
        "                                                                   classifications=classifications)\n",
        "\n",
        "#re-write the entity source and target annotations withe text selectios\n",
        "entity_source_doc = lb_types.DocumentEntity(name=\"named_entity\", text_selections= text_selections_source)\n",
        "entity_source = lb_types.ObjectAnnotation(name=\"named_entity\", value=entity_source_doc)\n",
        "\n",
        "entity_target_doc = lb_types.DocumentEntity(name=\"named_entity\",  text_selections=text_selections_target)\n",
        "entity_target = lb_types.ObjectAnnotation(name=\"named_entity\", value=entity_target_doc)\n",
        "\n",
        "# re-write the entity relationship with the re-created entities\n",
        "entity_relationship = lb_types.RelationshipAnnotation(\n",
        "    name=\"relationship\",\n",
        "    value=lb_types.Relationship(\n",
        "        source=entity_source,\n",
        "        target=entity_target,\n",
        "        type=lb_types.Relationship.Type.UNIDIRECTIONAL,\n",
        "    ))\n"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "# Final NDJSON and python annotations\n",
        "print(f\"entities_annotations_ndjson={entities_annotations_ndjson}\")\n",
        "print(f\"entities_annotation={entities_annotation}\")\n",
        "print(f\"nested_entities_annotation_ndjson={ner_with_checklist_subclass_annotation_ndjson}\")\n",
        "print(f\"nested_entities_annotation={ner_with_checklist_subclass_annotation}\")\n",
        "print(f\"entity_source_ndjson={entity_source_ndjson}\")\n",
        "print(f\"entity_target_ndjson={entity_target_ndjson}\")\n",
        "print(f\"entity_source={entity_source}\")\n",
        "print(f\"entity_target={entity_target}\")"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "#### Python annotation\n",
        "Here we create the complete labels ndjson payload of annotations only using python annotation format. There is one annotation for each reference to an annotation that we created. Note that only a handful of python annotation types are supported for PDF documents."
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "labels = []\n",
        "\n",
        "labels.append(\n",
        "    lb_types.Label(\n",
        "        data=lb_types.DocumentData(\n",
        "            global_key=global_key),\n",
        "        annotations = [\n",
        "            entities_annotation,\n",
        "            checklist_annotation,\n",
        "            nested_checklist_annotation,\n",
        "            text_annotation,\n",
        "            radio_annotation,\n",
        "            nested_radio_annotation,\n",
        "            bbox_annotation,\n",
        "            bbox_with_radio_subclass_annotation,\n",
        "            ner_with_checklist_subclass_annotation,\n",
        "            entity_source,\n",
        "            entity_target,\n",
        "            entity_relationship,# Only supported for MAL imports\n",
        "            bbox_source,\n",
        "            bbox_target,\n",
        "            bbox_relationship  # Only supported for MAL imports\n",
        "        ]\n",
        "  )\n",
        ")"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "#### NDJson annotations\n",
        "Here we create the complete labels ndjson payload of annotations only using NDJSON format. There is one annotation for each reference to an annotation that we created above."
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "\n",
        "label_ndjson = []\n",
        "for annot in [\n",
        "    entities_annotations_ndjson,\n",
        "    checklist_annotation_ndjson,\n",
        "    nested_checklist_annotation_ndjson,\n",
        "    text_annotation_ndjson,\n",
        "    radio_annotation_ndjson,\n",
        "    nested_radio_annotation_ndjson,\n",
        "    bbox_annotation_ndjson,\n",
        "    bbox_with_radio_subclass_annotation_ndjson,\n",
        "    ner_with_checklist_subclass_annotation_ndjson,\n",
        "    entity_source_ndjson,\n",
        "    entity_target_ndjson,\n",
        "    ner_relationship_annotation_ndjson, # Only supported for MAL imports\n",
        "    bbox_source_ndjson,\n",
        "    bbox_target_ndjson,\n",
        "    bbox_relationship_annotation_ndjson # Only supported for MAL imports\n",
        "  ]:\n",
        "  annot.update({\n",
        "      \"dataRow\": {\"globalKey\": global_key},\n",
        "  })\n",
        "  label_ndjson.append(annot)\n",
        "\n"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "### Step 6: Import the annotation payload\n",
        "For the purpose of this tutorial only import one of the annotations payloads at the time (NDJSON or Python annotation types)."
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "Option A: Upload to a labeling project as pre-labels (MAL)"
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "upload_job = lb.MALPredictionImport.create_from_objects(\n",
        "    client = client,\n",
        "    project_id = project.uid,\n",
        "    name=\"pdf_annotation_upload\" + str(uuid.uuid4()),\n",
        "    predictions=labels)\n",
        "\n",
        "upload_job.wait_until_done()\n",
        "# Errors will appear for annotation uploads that failed.\n",
        "print(\"Errors:\", upload_job.errors)\n",
        "print(\"Status of uploads: \", upload_job.statuses)"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    },
    {
      "metadata": {},
      "source": [
        "Option B: Upload to a labeling project using ground truth"
      ],
      "cell_type": "markdown"
    },
    {
      "metadata": {},
      "source": [
        "# Uncomment this code when excluding relationships from label import\n",
        "## Relationships are not currently supported for label import\n",
        "\n",
        "# upload_job = lb.LabelImport.create_from_objects(\n",
        "#     client = client,\n",
        "#     project_id = project.uid,\n",
        "#     name=\"label_import_job\"+str(uuid.uuid4()),\n",
        "#     labels=labels) ## Remove unsupported relationships from the labels list\n",
        "\n",
        "# print(\"Errors:\", upload_job.errors)\n",
        "# print(\"Status of uploads: \", upload_job.statuses)"
      ],
      "cell_type": "code",
      "outputs": [],
      "execution_count": null
    }
  ]
}