{
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "JNSdeDvgpdng"
      },
      "source": [
        "# Notebook Setup\n",
        "\n",
        "<a target=\"_blank\" href=\"https://colab.research.google.com/github/PacktPublishing/Generative-AI-Integration-Patterns-1E/blob/main/Chapter07/Integration_pattern_Real_time_intent_classification.ipynb\">\n",
        "  <img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/>\n",
        "</a>"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 921
        },
        "id": "nZzaJzUUoRbW",
        "outputId": "072bd177-45dd-43ad-e2d0-ea4d19bdf7fa"
      },
      "outputs": [],
      "source": [
        "#Install dependencies\n",
        "\n",
        "!pip install --upgrade google-cloud-aiplatform"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "euBE2B3DpHSQ",
        "outputId": "034040b2-f17b-4fde-b12a-eef4a4d08781"
      },
      "outputs": [],
      "source": [
        "#Authenticate\n",
        "!gcloud auth application-default login"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "IHRIcyzsoT6J"
      },
      "outputs": [],
      "source": [
        "import base64\n",
        "import json\n",
        "\n",
        "import vertexai\n",
        "from vertexai.generative_models import GenerativeModel, Part, FinishReason\n",
        "import vertexai.preview.generative_models as generative_models\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "qgNuihpJqgkJ"
      },
      "outputs": [],
      "source": [
        "PROJECT = \"testproject-410220\"#@param {type:\"string\"}\n",
        "LOCATION = \"us-central1\"#@param {type:\"string\"}\n",
        "MODEL = \"gemini-1.5-flash-001\"#@param {type:\"string\"}"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "91JnL54jpkkE"
      },
      "source": [
        "# Function Definitions"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Ioi8N6nNp4xE"
      },
      "outputs": [],
      "source": [
        "generation_config = {\n",
        "    \"max_output_tokens\": 8192,\n",
        "    \"temperature\": 0,\n",
        "    \"top_p\": 0.95,\n",
        "}\n",
        "\n",
        "safety_settings = {\n",
        "    generative_models.HarmCategory.HARM_CATEGORY_HATE_SPEECH: generative_models.HarmBlockThreshold.BLOCK_ONLY_HIGH,\n",
        "    generative_models.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: generative_models.HarmBlockThreshold.BLOCK_ONLY_HIGH,\n",
        "    generative_models.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: generative_models.HarmBlockThreshold.BLOCK_ONLY_HIGH,\n",
        "    generative_models.HarmCategory.HARM_CATEGORY_HARASSMENT: generative_models.HarmBlockThreshold.BLOCK_ONLY_HIGH,\n",
        "}\n",
        "\n",
        "def generate(prompt):\n",
        "  vertexai.init(project=PROJECT, location=LOCATION)\n",
        "  model = GenerativeModel(MODEL)\n",
        "  responses = model.generate_content(\n",
        "      [prompt],\n",
        "      generation_config=generation_config,\n",
        "      safety_settings=safety_settings,\n",
        "      stream=False,\n",
        "  )\n",
        "  return(responses)\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "B7rAfCnbs22H"
      },
      "source": [
        "# Entry Point"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "pg1e3tGBwgJ1"
      },
      "outputs": [],
      "source": [
        "#In this case we will simulate the input from a chat interface\n",
        "\n",
        "message = \"I want to open an account\""
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ihAHrCBas5eW"
      },
      "source": [
        "# Prompt Preprocessing"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "YkAR047_r6jL"
      },
      "outputs": [],
      "source": [
        "#In this section we define the prompt, as the task is to perform intent classification we will identify the intent by exposing the possible values to the LLM\n",
        "prompt_template = \"\"\"\n",
        "You are a helpful assistant for an online financial services company that allows users to check their balances, invest in certificates of deposit (CDs), and perform other financial transactions.\n",
        "\n",
        "Your task is to identify what your customers are trying to do and return a well formed JSON object.\n",
        "\n",
        "1. Carefully analyze the content of the message.\n",
        "2. Classify what the user is trying to do within these options:\n",
        "    * New Account: The user is trying to sign up. Return {{\"intent\": \"signup\", \"content\":\"null\"}}\n",
        "    * Change Password: The user needs to reset their password. Return {{\"intent\":\"change_password\", \"content\":\"null\"}}\n",
        "    * Check Balance: The user needs to check their balance. Return {{\"intent\": \"check_balance\", \"content\":\"null\"}}\n",
        "    * Invest in CD: The user wants to invest in a certificate of deposit. Return {{\"intent\": \"invest_cd\", \"content\": \"Extract relevant information such as investment amount and term\"}}\n",
        "    * Withdraw Funds: The user wants to withdraw money. Return {{\"intent\": \"withdraw_funds\", \"content\": \"Extract information like amount and withdrawal method\"}}\n",
        "    * Transfer Funds: The user wants to transfer money between accounts. Return {{\"intent\": \"transfer_funds\", \"content\": \"Extract information like amount, source account, and destination account\"}}\n",
        "    * Account Information: The user wants to access or update their account information. Return {{\"intent\": \"account_info\", \"content\": \"Identify the specific information the user needs\"}}\n",
        "    * Lost/Stolen Card: The user wants to report a lost or stolen card. Return {{\"intent\": \"lost_card\", \"content\": \"null\"}}\n",
        "    * Support: The user needs help and is not sure what to do. Return {{\"intent\": \"support\", \"content\": \"null\"}}\n",
        "    * Other: For other queries, politely decline to answer and clarify what you can help with.\n",
        "3. Only return the proper JSON result from your classification.\n",
        "4. Always think step by step.\n",
        "\n",
        "User question: {query}\n",
        "JSON:\n",
        "\"\"\"\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "LuJusiKDs7Au"
      },
      "source": [
        "# Inference"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "N_YWvX5lhHZV",
        "outputId": "389b44e1-e605-444e-8f99-403bd9e3f7f9"
      },
      "outputs": [],
      "source": [
        "result = generate(prompt_template.format(query=message))"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "fnSrhoN1tACl"
      },
      "source": [
        "# Result Postprocessing"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "ggiDKZp3292w",
        "outputId": "592aea11-58bb-4c1a-ab54-1e128ba18168"
      },
      "outputs": [],
      "source": [
        "### Sometimes model return markdown friendly content, in this case we will implement a function to filter this.\n",
        "\n",
        "def extract_json(text):\n",
        "  \"\"\"\n",
        "  Extracts the JSON portion from a string containing backticks.\n",
        "\n",
        "  Args:\n",
        "    text: The string containing JSON data within backticks.\n",
        "\n",
        "  Returns:\n",
        "    A dictionary representing the extracted JSON, or None if no valid JSON is found.\n",
        "  \"\"\"\n",
        "  start_index = text.find(\"```json\")\n",
        "  end_index = text.find(\"```\", start_index + 7)  # +7 to skip \"```json\"\n",
        "\n",
        "  if start_index != -1 and end_index != -1:\n",
        "    json_string = text[start_index + 7: end_index]  # Extract the JSON string\n",
        "  else:\n",
        "    json_string = text\n",
        "  try:\n",
        "    json_data = json.loads(json_string)\n",
        "    return json_data\n",
        "  except json.JSONDecodeError:\n",
        "    return None\n",
        "\n",
        "def process_intent(intent):\n",
        "  if intent[\"intent\"] == \"signup\":\n",
        "    #If a user is trying to sign up you could redirect the to a sign up page for example.\n",
        "    return(\"Sign up process\")\n",
        "  elif intent[\"intent\"] == \"change_password\":\n",
        "    #If a user is looking into changing their password, you could either do it through the chatbot, or redirect to a password change page.\n",
        "    return(\"Change password\")\n",
        "  elif intent[\"intent\"] == \"check_balance\":\n",
        "    #In this case you could have a function that would query a database to obtain the balance (as long as the user is logged in or not)\n",
        "    return(\"Check account balance\")\n",
        "  elif intent[\"intent\"] == \"invest_cd\":\n",
        "    #For the investment intent, this could redirect to a page where investment options can be selected.\n",
        "    return(\"Invest in a CD\")\n",
        "  elif intent[\"intent\"] == \"withdraw_funds\":\n",
        "    return(\"Withdraw funds\")\n",
        "  elif intent[\"intent\"] == \"transfer_funds\":\n",
        "    return(\"Transfer funds\")\n",
        "  elif intent[\"intent\"] == \"account_info\":\n",
        "    return(\"Account information\")\n",
        "  elif intent[\"intent\"] == \"lost_card\":\n",
        "    return(\"Report lost card\")\n",
        "  elif intent[\"intent\"] == \"support\":\n",
        "    return(\"Contact support\")\n",
        "  elif intent[\"intent\"] == \"other\":\n",
        "    return(\"Other kind of intent\")\n",
        "  else:\n",
        "    return(\"If a intent was classified as something else you should investigate what is going on.\")\n",
        "\n",
        "intent = process_intent(extract_json(result.text))\n",
        "\n",
        "print(intent)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "9Vy0H0pptCCF"
      },
      "source": [
        "# Result Presentation"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "_vsqw1SAYRtc",
        "outputId": "1feb84bc-e4d4-45e4-918f-c450940207e1"
      },
      "outputs": [],
      "source": [
        "#In this case we will use a Gradio interface to interact with the system\n",
        "\n",
        "#Install Gradio\n",
        "\n",
        "!pip install --upgrade gradio"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "background_save": true,
          "base_uri": "https://localhost:8080/",
          "height": 648
        },
        "id": "P2JOzLS6cgeq",
        "outputId": "61a901a2-7eb3-4879-a117-e5d1a135be1f"
      },
      "outputs": [],
      "source": [
        "import gradio as gr\n",
        "\n",
        "def chat(message, history):\n",
        "    response = generate(prompt_template.format(query=message))\n",
        "    intent_action = process_intent(extract_json(response.text))\n",
        "    history.append((message, intent_action))\n",
        "    return \"\", history\n",
        "\n",
        "\n",
        "with gr.Blocks() as demo:\n",
        "  gr.Markdown(\"Fintech Assistant\")\n",
        "  chatbot = gr.Chatbot(show_label=False)\n",
        "  message = gr.Textbox(placeholder=\"Enter your question\")\n",
        "  message.submit(chat, [message, chatbot],[message, chatbot]  )\n",
        "\n",
        "demo.launch(debug=True)"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "provenance": []
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
