{
  "cells": [
    {
      "cell_type": "code",
      "source": [
        "from google.colab import drive\n",
        "drive.mount('/content/drive')"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "eQ9aqjHjt9a7",
        "outputId": "ccaccfb7-481b-4d94-d0b5-6154ae3349a6"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Mounted at /content/drive\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "# Journalism Guidance + LM"
      ],
      "metadata": {
        "id": "tY2z9FLtU8rU"
      }
    },
    {
      "cell_type": "markdown",
      "source": [
        "## Requirements"
      ],
      "metadata": {
        "id": "GHlVDq4EdrMO"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "!pip install transformers[sentencepiece]"
      ],
      "metadata": {
        "id": "YVhVyuAbFk0F",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "8e53d3ec-5ac5-4dad-9145-2d3f78d1a476"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Collecting transformers[sentencepiece]\n",
            "  Downloading transformers-4.28.1-py3-none-any.whl (7.0 MB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m7.0/7.0 MB\u001b[0m \u001b[31m95.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hRequirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from transformers[sentencepiece]) (2.27.1)\n",
            "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from transformers[sentencepiece]) (6.0)\n",
            "Collecting tokenizers!=0.11.3,<0.14,>=0.11.1\n",
            "  Downloading tokenizers-0.13.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (7.8 MB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m7.8/7.8 MB\u001b[0m \u001b[31m78.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from transformers[sentencepiece]) (3.12.0)\n",
            "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from transformers[sentencepiece]) (1.22.4)\n",
            "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from transformers[sentencepiece]) (23.1)\n",
            "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.10/dist-packages (from transformers[sentencepiece]) (4.65.0)\n",
            "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.10/dist-packages (from transformers[sentencepiece]) (2022.10.31)\n",
            "Collecting huggingface-hub<1.0,>=0.11.0\n",
            "  Downloading huggingface_hub-0.14.1-py3-none-any.whl (224 kB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m224.5/224.5 kB\u001b[0m \u001b[31m25.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hCollecting protobuf<=3.20.2\n",
            "  Downloading protobuf-3.20.2-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (1.1 MB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.1/1.1 MB\u001b[0m \u001b[31m59.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hCollecting sentencepiece!=0.1.92,>=0.1.91\n",
            "  Downloading sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.3 MB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m87.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hRequirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.11.0->transformers[sentencepiece]) (2023.4.0)\n",
            "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.11.0->transformers[sentencepiece]) (4.5.0)\n",
            "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.10/dist-packages (from requests->transformers[sentencepiece]) (2.0.12)\n",
            "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->transformers[sentencepiece]) (1.26.15)\n",
            "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->transformers[sentencepiece]) (3.4)\n",
            "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->transformers[sentencepiece]) (2022.12.7)\n",
            "Installing collected packages: tokenizers, sentencepiece, protobuf, huggingface-hub, transformers\n",
            "  Attempting uninstall: protobuf\n",
            "    Found existing installation: protobuf 3.20.3\n",
            "    Uninstalling protobuf-3.20.3:\n",
            "      Successfully uninstalled protobuf-3.20.3\n",
            "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
            "tensorflow 2.12.0 requires protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.20.3, but you have protobuf 3.20.2 which is incompatible.\n",
            "tensorflow-metadata 1.13.1 requires protobuf<5,>=3.20.3, but you have protobuf 3.20.2 which is incompatible.\u001b[0m\u001b[31m\n",
            "\u001b[0mSuccessfully installed huggingface-hub-0.14.1 protobuf-3.20.2 sentencepiece-0.1.99 tokenizers-0.13.3 transformers-4.28.1\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "!pip install nltk\n",
        "\n",
        "!pip install py-readability-metrics\n",
        "!python -m nltk.downloader punkt"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "crE0Q4zMGTeh",
        "outputId": "15fd55cd-e388-4f4b-a7e9-0e349a9244f2"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Requirement already satisfied: nltk in /usr/local/lib/python3.10/dist-packages (3.8.1)\n",
            "Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.10/dist-packages (from nltk) (2022.10.31)\n",
            "Requirement already satisfied: joblib in /usr/local/lib/python3.10/dist-packages (from nltk) (1.2.0)\n",
            "Requirement already satisfied: click in /usr/local/lib/python3.10/dist-packages (from nltk) (8.1.3)\n",
            "Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from nltk) (4.65.0)\n",
            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Collecting py-readability-metrics\n",
            "  Downloading py_readability_metrics-1.4.5-py3-none-any.whl (26 kB)\n",
            "Requirement already satisfied: nltk in /usr/local/lib/python3.10/dist-packages (from py-readability-metrics) (3.8.1)\n",
            "Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from nltk->py-readability-metrics) (4.65.0)\n",
            "Requirement already satisfied: click in /usr/local/lib/python3.10/dist-packages (from nltk->py-readability-metrics) (8.1.3)\n",
            "Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.10/dist-packages (from nltk->py-readability-metrics) (2022.10.31)\n",
            "Requirement already satisfied: joblib in /usr/local/lib/python3.10/dist-packages (from nltk->py-readability-metrics) (1.2.0)\n",
            "Installing collected packages: py-readability-metrics\n",
            "Successfully installed py-readability-metrics-1.4.5\n",
            "/usr/lib/python3.10/runpy.py:126: RuntimeWarning: 'nltk.downloader' found in sys.modules after import of package 'nltk', but prior to execution of 'nltk.downloader'; this may result in unpredictable behaviour\n",
            "  warn(RuntimeWarning(msg))\n",
            "[nltk_data] Downloading package punkt to /root/nltk_data...\n",
            "[nltk_data]   Unzipping tokenizers/punkt.zip.\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "!pip install lexicalrichness"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "nAbqcuIToHq0",
        "outputId": "f8c4e7db-12a1-4a5f-d14b-bbce695a7485"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Collecting lexicalrichness\n",
            "  Downloading lexicalrichness-0.5.0.tar.gz (96 kB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m96.5/96.5 kB\u001b[0m \u001b[31m4.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25h  Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
            "Requirement already satisfied: scipy>=1.0.0 in /usr/local/lib/python3.10/dist-packages (from lexicalrichness) (1.10.1)\n",
            "Requirement already satisfied: textblob>=0.15.3 in /usr/local/lib/python3.10/dist-packages (from lexicalrichness) (0.17.1)\n",
            "Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from lexicalrichness) (1.1.5)\n",
            "Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (from lexicalrichness) (3.7.1)\n",
            "Requirement already satisfied: numpy<1.27.0,>=1.19.5 in /usr/local/lib/python3.10/dist-packages (from scipy>=1.0.0->lexicalrichness) (1.22.4)\n",
            "Requirement already satisfied: nltk>=3.1 in /usr/local/lib/python3.10/dist-packages (from textblob>=0.15.3->lexicalrichness) (3.8.1)\n",
            "Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->lexicalrichness) (8.4.0)\n",
            "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->lexicalrichness) (23.1)\n",
            "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib->lexicalrichness) (0.11.0)\n",
            "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->lexicalrichness) (3.0.9)\n",
            "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib->lexicalrichness) (2.8.2)\n",
            "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->lexicalrichness) (1.4.4)\n",
            "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->lexicalrichness) (1.0.7)\n",
            "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->lexicalrichness) (4.39.3)\n",
            "Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.10/dist-packages (from pandas->lexicalrichness) (2022.7.1)\n",
            "Requirement already satisfied: click in /usr/local/lib/python3.10/dist-packages (from nltk>=3.1->textblob>=0.15.3->lexicalrichness) (8.1.3)\n",
            "Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.10/dist-packages (from nltk>=3.1->textblob>=0.15.3->lexicalrichness) (2022.10.31)\n",
            "Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from nltk>=3.1->textblob>=0.15.3->lexicalrichness) (4.53.0)\n",
            "Requirement already satisfied: joblib in /usr/local/lib/python3.10/dist-packages (from nltk>=3.1->textblob>=0.15.3->lexicalrichness) (1.2.0)\n",
            "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib->lexicalrichness) (1.16.0)\n",
            "Building wheels for collected packages: lexicalrichness\n",
            "  Building wheel for lexicalrichness (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
            "  Created wheel for lexicalrichness: filename=lexicalrichness-0.5.0-py3-none-any.whl size=15534 sha256=c90cd77b709de7454444726f9c59e6192398845e6b8f0fc9f29b127c687dd910\n",
            "  Stored in directory: /root/.cache/pip/wheels/42/68/59/1edd70c2b91dc172fa208eb34799e90bc6c093bfbb862ff017\n",
            "Successfully built lexicalrichness\n",
            "Installing collected packages: lexicalrichness\n",
            "Successfully installed lexicalrichness-0.5.0\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "!pip install numpy==1.19.4\n",
        "!pip install PyYAML>=5.4\n",
        "!pip install spacy==2.2.4\n",
        "!pip install torch==1.7.0\n",
        "!pip install torchtext==0.3.1\n",
        "!pip install tqdm==4.53.0\n",
        "!pip install pandas==1.1.5\n",
        "!pip install transformers==4.3.2\n",
        "!pip install fire==0.4.0\n",
        "!pip install requests==2.23.0\n",
        "!pip install tensorboard==2.4.1\n",
        "!pip install download==0.3.5\n",
        "!pip install nltk>=3.6.6\n",
        "\n",
        "# !pip install py-readability-metrics\n",
        "!python -m nltk.downloader punkt\n",
        "# !pip install lexicalrichness"
      ],
      "metadata": {
        "id": "VnUbCQLHdv8D",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "e6d6ab39-9b45-4036-96aa-295c2316d8ae"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Collecting numpy==1.19.4\n",
            "  Downloading numpy-1.19.4.zip (7.3 MB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m7.3/7.3 MB\u001b[0m \u001b[31m20.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25h  Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
            "  Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
            "  Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
            "Building wheels for collected packages: numpy\n",
            "  \u001b[1;31merror\u001b[0m: \u001b[1msubprocess-exited-with-error\u001b[0m\n",
            "  \n",
            "  \u001b[31m×\u001b[0m \u001b[32mBuilding wheel for numpy \u001b[0m\u001b[1;32m(\u001b[0m\u001b[32mpyproject.toml\u001b[0m\u001b[1;32m)\u001b[0m did not run successfully.\n",
            "  \u001b[31m│\u001b[0m exit code: \u001b[1;36m1\u001b[0m\n",
            "  \u001b[31m╰─>\u001b[0m See above for output.\n",
            "  \n",
            "  \u001b[1;35mnote\u001b[0m: This error originates from a subprocess, and is likely not a problem with pip.\n",
            "  Building wheel for numpy (pyproject.toml) ... \u001b[?25l\u001b[?25herror\n",
            "\u001b[31m  ERROR: Failed building wheel for numpy\u001b[0m\u001b[31m\n",
            "\u001b[0mFailed to build numpy\n",
            "\u001b[31mERROR: Could not build wheels for numpy, which is required to install pyproject.toml-based projects\u001b[0m\u001b[31m\n",
            "\u001b[0mLooking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Collecting spacy==2.2.4\n",
            "  Downloading spacy-2.2.4.tar.gz (6.1 MB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.1/6.1 MB\u001b[0m \u001b[31m52.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25h  \u001b[1;31merror\u001b[0m: \u001b[1msubprocess-exited-with-error\u001b[0m\n",
            "  \n",
            "  \u001b[31m×\u001b[0m \u001b[32mpip subprocess to install build dependencies\u001b[0m did not run successfully.\n",
            "  \u001b[31m│\u001b[0m exit code: \u001b[1;36m1\u001b[0m\n",
            "  \u001b[31m╰─>\u001b[0m See above for output.\n",
            "  \n",
            "  \u001b[1;35mnote\u001b[0m: This error originates from a subprocess, and is likely not a problem with pip.\n",
            "  Installing build dependencies ... \u001b[?25l\u001b[?25herror\n",
            "\u001b[1;31merror\u001b[0m: \u001b[1msubprocess-exited-with-error\u001b[0m\n",
            "\n",
            "\u001b[31m×\u001b[0m \u001b[32mpip subprocess to install build dependencies\u001b[0m did not run successfully.\n",
            "\u001b[31m│\u001b[0m exit code: \u001b[1;36m1\u001b[0m\n",
            "\u001b[31m╰─>\u001b[0m See above for output.\n",
            "\n",
            "\u001b[1;35mnote\u001b[0m: This error originates from a subprocess, and is likely not a problem with pip.\n",
            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "\u001b[31mERROR: Could not find a version that satisfies the requirement torch==1.7.0 (from versions: 1.11.0, 1.12.0, 1.12.1, 1.13.0, 1.13.1, 2.0.0)\u001b[0m\u001b[31m\n",
            "\u001b[0m\u001b[31mERROR: No matching distribution found for torch==1.7.0\u001b[0m\u001b[31m\n",
            "\u001b[0mLooking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Collecting torchtext==0.3.1\n",
            "  Downloading torchtext-0.3.1-py3-none-any.whl (62 kB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.4/62.4 kB\u001b[0m \u001b[31m3.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hRequirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from torchtext==0.3.1) (2.27.1)\n",
            "Requirement already satisfied: torch in /usr/local/lib/python3.10/dist-packages (from torchtext==0.3.1) (2.0.0+cu118)\n",
            "Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from torchtext==0.3.1) (4.65.0)\n",
            "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from torchtext==0.3.1) (1.22.4)\n",
            "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->torchtext==0.3.1) (1.26.15)\n",
            "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->torchtext==0.3.1) (2022.12.7)\n",
            "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->torchtext==0.3.1) (3.4)\n",
            "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.10/dist-packages (from requests->torchtext==0.3.1) (2.0.12)\n",
            "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch->torchtext==0.3.1) (4.5.0)\n",
            "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch->torchtext==0.3.1) (3.1.2)\n",
            "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch->torchtext==0.3.1) (3.12.0)\n",
            "Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.10/dist-packages (from torch->torchtext==0.3.1) (2.0.0)\n",
            "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch->torchtext==0.3.1) (1.11.1)\n",
            "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch->torchtext==0.3.1) (3.1)\n",
            "Requirement already satisfied: cmake in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch->torchtext==0.3.1) (3.25.2)\n",
            "Requirement already satisfied: lit in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch->torchtext==0.3.1) (16.0.2)\n",
            "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch->torchtext==0.3.1) (2.1.2)\n",
            "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch->torchtext==0.3.1) (1.3.0)\n",
            "Installing collected packages: torchtext\n",
            "  Attempting uninstall: torchtext\n",
            "    Found existing installation: torchtext 0.15.1\n",
            "    Uninstalling torchtext-0.15.1:\n",
            "      Successfully uninstalled torchtext-0.15.1\n",
            "Successfully installed torchtext-0.3.1\n",
            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Collecting tqdm==4.53.0\n",
            "  Downloading tqdm-4.53.0-py2.py3-none-any.whl (70 kB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m70.3/70.3 kB\u001b[0m \u001b[31m4.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hInstalling collected packages: tqdm\n",
            "  Attempting uninstall: tqdm\n",
            "    Found existing installation: tqdm 4.65.0\n",
            "    Uninstalling tqdm-4.65.0:\n",
            "      Successfully uninstalled tqdm-4.65.0\n",
            "Successfully installed tqdm-4.53.0\n",
            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Collecting pandas==1.1.5\n",
            "  Downloading pandas-1.1.5.tar.gz (5.2 MB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.2/5.2 MB\u001b[0m \u001b[31m60.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25h  Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
            "  Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
            "  Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
            "Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.10/dist-packages (from pandas==1.1.5) (2.8.2)\n",
            "Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.10/dist-packages (from pandas==1.1.5) (2022.7.1)\n",
            "Requirement already satisfied: numpy>=1.15.4 in /usr/local/lib/python3.10/dist-packages (from pandas==1.1.5) (1.22.4)\n",
            "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7.3->pandas==1.1.5) (1.16.0)\n",
            "Building wheels for collected packages: pandas\n",
            "  Building wheel for pandas (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
            "  Created wheel for pandas: filename=pandas-1.1.5-cp310-cp310-linux_x86_64.whl size=39767736 sha256=96798b35f581d637c4567d3109f791a3ec4a041f3dcc7c06babf1ea14ae60673\n",
            "  Stored in directory: /root/.cache/pip/wheels/92/41/92/769e05cd303040fdae625c9f5a8d38f1d37c423aa2a30b3d0d\n",
            "Successfully built pandas\n",
            "Installing collected packages: pandas\n",
            "  Attempting uninstall: pandas\n",
            "    Found existing installation: pandas 1.5.3\n",
            "    Uninstalling pandas-1.5.3:\n",
            "      Successfully uninstalled pandas-1.5.3\n",
            "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
            "yfinance 0.2.18 requires pandas>=1.3.0, but you have pandas 1.1.5 which is incompatible.\n",
            "xarray 2022.12.0 requires pandas>=1.3, but you have pandas 1.1.5 which is incompatible.\n",
            "plotnine 0.10.1 requires pandas>=1.3.5, but you have pandas 1.1.5 which is incompatible.\n",
            "mizani 0.8.1 requires pandas>=1.3.5, but you have pandas 1.1.5 which is incompatible.\n",
            "google-colab 1.0.0 requires pandas~=1.5.3, but you have pandas 1.1.5 which is incompatible.\n",
            "arviz 0.15.1 requires pandas>=1.3.0, but you have pandas 1.1.5 which is incompatible.\u001b[0m\u001b[31m\n",
            "\u001b[0mSuccessfully installed pandas-1.1.5\n",
            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Collecting transformers==4.3.2\n",
            "  Downloading transformers-4.3.2-py3-none-any.whl (1.8 MB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m32.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hCollecting tokenizers<0.11,>=0.10.1\n",
            "  Downloading tokenizers-0.10.3.tar.gz (212 kB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m212.7/212.7 kB\u001b[0m \u001b[31m25.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25h  Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
            "  Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
            "  Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
            "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from transformers==4.3.2) (1.22.4)\n",
            "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.10/dist-packages (from transformers==4.3.2) (2022.10.31)\n",
            "Collecting sacremoses\n",
            "  Downloading sacremoses-0.0.53.tar.gz (880 kB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m880.6/880.6 kB\u001b[0m \u001b[31m67.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25h  Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
            "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.10/dist-packages (from transformers==4.3.2) (4.53.0)\n",
            "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from transformers==4.3.2) (3.12.0)\n",
            "Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from transformers==4.3.2) (23.1)\n",
            "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from transformers==4.3.2) (2.27.1)\n",
            "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.10/dist-packages (from requests->transformers==4.3.2) (2.0.12)\n",
            "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->transformers==4.3.2) (2022.12.7)\n",
            "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->transformers==4.3.2) (3.4)\n",
            "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->transformers==4.3.2) (1.26.15)\n",
            "Requirement already satisfied: six in /usr/local/lib/python3.10/dist-packages (from sacremoses->transformers==4.3.2) (1.16.0)\n",
            "Requirement already satisfied: click in /usr/local/lib/python3.10/dist-packages (from sacremoses->transformers==4.3.2) (8.1.3)\n",
            "Requirement already satisfied: joblib in /usr/local/lib/python3.10/dist-packages (from sacremoses->transformers==4.3.2) (1.2.0)\n",
            "Building wheels for collected packages: tokenizers, sacremoses\n",
            "  \u001b[1;31merror\u001b[0m: \u001b[1msubprocess-exited-with-error\u001b[0m\n",
            "  \n",
            "  \u001b[31m×\u001b[0m \u001b[32mBuilding wheel for tokenizers \u001b[0m\u001b[1;32m(\u001b[0m\u001b[32mpyproject.toml\u001b[0m\u001b[1;32m)\u001b[0m did not run successfully.\n",
            "  \u001b[31m│\u001b[0m exit code: \u001b[1;36m1\u001b[0m\n",
            "  \u001b[31m╰─>\u001b[0m See above for output.\n",
            "  \n",
            "  \u001b[1;35mnote\u001b[0m: This error originates from a subprocess, and is likely not a problem with pip.\n",
            "  Building wheel for tokenizers (pyproject.toml) ... \u001b[?25l\u001b[?25herror\n",
            "\u001b[31m  ERROR: Failed building wheel for tokenizers\u001b[0m\u001b[31m\n",
            "\u001b[0m  Building wheel for sacremoses (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
            "  Created wheel for sacremoses: filename=sacremoses-0.0.53-py3-none-any.whl size=895259 sha256=90f33710ef63f693919e9811672864dae8a361905a94f65aa6f36814d8092806\n",
            "  Stored in directory: /root/.cache/pip/wheels/00/24/97/a2ea5324f36bc626e1ea0267f33db6aa80d157ee977e9e42fb\n",
            "Successfully built sacremoses\n",
            "Failed to build tokenizers\n",
            "\u001b[31mERROR: Could not build wheels for tokenizers, which is required to install pyproject.toml-based projects\u001b[0m\u001b[31m\n",
            "\u001b[0mLooking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Collecting fire==0.4.0\n",
            "  Downloading fire-0.4.0.tar.gz (87 kB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m87.7/87.7 kB\u001b[0m \u001b[31m4.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25h  Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
            "Requirement already satisfied: six in /usr/local/lib/python3.10/dist-packages (from fire==0.4.0) (1.16.0)\n",
            "Requirement already satisfied: termcolor in /usr/local/lib/python3.10/dist-packages (from fire==0.4.0) (2.3.0)\n",
            "Building wheels for collected packages: fire\n",
            "  Building wheel for fire (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
            "  Created wheel for fire: filename=fire-0.4.0-py2.py3-none-any.whl size=115941 sha256=c077f8592f0f2f68e8d69dd971ca4e3d06a33b7b2dd6419dd453f430d0f47d8b\n",
            "  Stored in directory: /root/.cache/pip/wheels/26/9a/dd/2818b1b023daf077ec3e625c47ae446aca587a5abe48e05212\n",
            "Successfully built fire\n",
            "Installing collected packages: fire\n",
            "Successfully installed fire-0.4.0\n",
            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Collecting requests==2.23.0\n",
            "  Downloading requests-2.23.0-py2.py3-none-any.whl (58 kB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.4/58.4 kB\u001b[0m \u001b[31m5.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hCollecting idna<3,>=2.5\n",
            "  Downloading idna-2.10-py2.py3-none-any.whl (58 kB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.8/58.8 kB\u001b[0m \u001b[31m6.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hCollecting urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1\n",
            "  Downloading urllib3-1.25.11-py2.py3-none-any.whl (127 kB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m128.0/128.0 kB\u001b[0m \u001b[31m10.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests==2.23.0) (2022.12.7)\n",
            "Collecting chardet<4,>=3.0.2\n",
            "  Downloading chardet-3.0.4-py2.py3-none-any.whl (133 kB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m133.4/133.4 kB\u001b[0m \u001b[31m18.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hInstalling collected packages: chardet, urllib3, idna, requests\n",
            "  Attempting uninstall: chardet\n",
            "    Found existing installation: chardet 4.0.0\n",
            "    Uninstalling chardet-4.0.0:\n",
            "      Successfully uninstalled chardet-4.0.0\n",
            "  Attempting uninstall: urllib3\n",
            "    Found existing installation: urllib3 1.26.15\n",
            "    Uninstalling urllib3-1.26.15:\n",
            "      Successfully uninstalled urllib3-1.26.15\n",
            "  Attempting uninstall: idna\n",
            "    Found existing installation: idna 3.4\n",
            "    Uninstalling idna-3.4:\n",
            "      Successfully uninstalled idna-3.4\n",
            "  Attempting uninstall: requests\n",
            "    Found existing installation: requests 2.27.1\n",
            "    Uninstalling requests-2.27.1:\n",
            "      Successfully uninstalled requests-2.27.1\n",
            "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
            "yfinance 0.2.18 requires pandas>=1.3.0, but you have pandas 1.1.5 which is incompatible.\n",
            "yfinance 0.2.18 requires requests>=2.26, but you have requests 2.23.0 which is incompatible.\n",
            "tweepy 4.13.0 requires requests<3,>=2.27.0, but you have requests 2.23.0 which is incompatible.\n",
            "google-colab 1.0.0 requires pandas~=1.5.3, but you have pandas 1.1.5 which is incompatible.\n",
            "google-colab 1.0.0 requires requests>=2.27.0, but you have requests 2.23.0 which is incompatible.\u001b[0m\u001b[31m\n",
            "\u001b[0mSuccessfully installed chardet-3.0.4 idna-2.10 requests-2.23.0 urllib3-1.25.11\n",
            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Collecting tensorboard==2.4.1\n",
            "  Downloading tensorboard-2.4.1-py3-none-any.whl (10.6 MB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m10.6/10.6 MB\u001b[0m \u001b[31m94.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hRequirement already satisfied: protobuf>=3.6.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.4.1) (3.20.2)\n",
            "Collecting google-auth-oauthlib<0.5,>=0.4.1\n",
            "  Downloading google_auth_oauthlib-0.4.6-py2.py3-none-any.whl (18 kB)\n",
            "Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.4.1) (2.23.0)\n",
            "Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.4.1) (0.40.0)\n",
            "Requirement already satisfied: numpy>=1.12.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.4.1) (1.22.4)\n",
            "Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.4.1) (1.4.0)\n",
            "Collecting google-auth<2,>=1.6.3\n",
            "  Downloading google_auth-1.35.0-py2.py3-none-any.whl (152 kB)\n",
            "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m152.9/152.9 kB\u001b[0m \u001b[31m19.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25hRequirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.4.1) (3.4.3)\n",
            "Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.4.1) (1.8.1)\n",
            "Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.4.1) (67.7.2)\n",
            "Requirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.4.1) (1.16.0)\n",
            "Requirement already satisfied: grpcio>=1.24.3 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.4.1) (1.54.0)\n",
            "Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.10/dist-packages (from tensorboard==2.4.1) (2.3.0)\n",
            "Collecting cachetools<5.0,>=2.0.0\n",
            "  Downloading cachetools-4.2.4-py3-none-any.whl (10 kB)\n",
            "Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.10/dist-packages (from google-auth<2,>=1.6.3->tensorboard==2.4.1) (0.3.0)\n",
            "Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.10/dist-packages (from google-auth<2,>=1.6.3->tensorboard==2.4.1) (4.9)\n",
            "Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard==2.4.1) (1.3.1)\n",
            "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorboard==2.4.1) (1.25.11)\n",
            "Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorboard==2.4.1) (2.10)\n",
            "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorboard==2.4.1) (2022.12.7)\n",
            "Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorboard==2.4.1) (3.0.4)\n",
            "Requirement already satisfied: MarkupSafe>=2.1.1 in /usr/local/lib/python3.10/dist-packages (from werkzeug>=0.11.15->tensorboard==2.4.1) (2.1.2)\n",
            "Requirement already satisfied: pyasn1<0.6.0,>=0.4.6 in /usr/local/lib/python3.10/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tensorboard==2.4.1) (0.5.0)\n",
            "Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.10/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard==2.4.1) (3.2.2)\n",
            "Installing collected packages: cachetools, google-auth, google-auth-oauthlib, tensorboard\n",
            "  Attempting uninstall: cachetools\n",
            "    Found existing installation: cachetools 5.3.0\n",
            "    Uninstalling cachetools-5.3.0:\n",
            "      Successfully uninstalled cachetools-5.3.0\n",
            "  Attempting uninstall: google-auth\n",
            "    Found existing installation: google-auth 2.17.3\n",
            "    Uninstalling google-auth-2.17.3:\n",
            "      Successfully uninstalled google-auth-2.17.3\n",
            "  Attempting uninstall: google-auth-oauthlib\n",
            "    Found existing installation: google-auth-oauthlib 1.0.0\n",
            "    Uninstalling google-auth-oauthlib-1.0.0:\n",
            "      Successfully uninstalled google-auth-oauthlib-1.0.0\n",
            "  Attempting uninstall: tensorboard\n",
            "    Found existing installation: tensorboard 2.12.2\n",
            "    Uninstalling tensorboard-2.12.2:\n",
            "      Successfully uninstalled tensorboard-2.12.2\n",
            "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
            "tensorflow 2.12.0 requires protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.20.3, but you have protobuf 3.20.2 which is incompatible.\n",
            "tensorflow 2.12.0 requires tensorboard<2.13,>=2.12, but you have tensorboard 2.4.1 which is incompatible.\n",
            "google-colab 1.0.0 requires pandas~=1.5.3, but you have pandas 1.1.5 which is incompatible.\n",
            "google-colab 1.0.0 requires requests>=2.27.0, but you have requests 2.23.0 which is incompatible.\n",
            "google-api-core 2.11.0 requires google-auth<3.0dev,>=2.14.1, but you have google-auth 1.35.0 which is incompatible.\u001b[0m\u001b[31m\n",
            "\u001b[0mSuccessfully installed cachetools-4.2.4 google-auth-1.35.0 google-auth-oauthlib-0.4.6 tensorboard-2.4.1\n",
            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Collecting download==0.3.5\n",
            "  Downloading download-0.3.5-py3-none-any.whl (8.8 kB)\n",
            "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from download==0.3.5) (2.23.0)\n",
            "Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from download==0.3.5) (4.53.0)\n",
            "Requirement already satisfied: six in /usr/local/lib/python3.10/dist-packages (from download==0.3.5) (1.16.0)\n",
            "Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.10/dist-packages (from requests->download==0.3.5) (3.0.4)\n",
            "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->download==0.3.5) (1.25.11)\n",
            "Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->download==0.3.5) (2.10)\n",
            "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->download==0.3.5) (2022.12.7)\n",
            "Installing collected packages: download\n",
            "Successfully installed download-0.3.5\n",
            "/usr/lib/python3.10/runpy.py:126: RuntimeWarning: 'nltk.downloader' found in sys.modules after import of package 'nltk', but prior to execution of 'nltk.downloader'; this may result in unpredictable behaviour\n",
            "  warn(RuntimeWarning(msg))\n",
            "[nltk_data] Downloading package punkt to /root/nltk_data...\n",
            "[nltk_data]   Package punkt is already up-to-date!\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "## Utils"
      ],
      "metadata": {
        "id": "Km_X3ft1e4n2"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "import sys\n",
        "from functools import reduce\n",
        "\n",
        "from torch import nn\n",
        "import torch.distributed as dist\n",
        "\n",
        "\n",
        "def summary(model: nn.Module, file=sys.stdout):\n",
        "    def repr(model):\n",
        "        # We treat the extra repr like the sub-module, one item per line\n",
        "        extra_lines = []\n",
        "        extra_repr = model.extra_repr()\n",
        "        # empty string will be split into list ['']\n",
        "        if extra_repr:\n",
        "            extra_lines = extra_repr.split('\\n')\n",
        "        child_lines = []\n",
        "        total_params = 0\n",
        "        for key, module in model._modules.items():\n",
        "            mod_str, num_params = repr(module)\n",
        "            mod_str = nn.modules.module._addindent(mod_str, 2)\n",
        "            child_lines.append('(' + key + '): ' + mod_str)\n",
        "            total_params += num_params\n",
        "        lines = extra_lines + child_lines\n",
        "\n",
        "        for name, p in model._parameters.items():\n",
        "            if hasattr(p, 'shape'):\n",
        "                total_params += reduce(lambda x, y: x * y, p.shape)\n",
        "\n",
        "        main_str = model._get_name() + '('\n",
        "        if lines:\n",
        "            # simple one-liner info, which most builtin Modules will use\n",
        "            if len(extra_lines) == 1 and not child_lines:\n",
        "                main_str += extra_lines[0]\n",
        "            else:\n",
        "                main_str += '\\n  ' + '\\n  '.join(lines) + '\\n'\n",
        "\n",
        "        main_str += ')'\n",
        "        if file is sys.stdout:\n",
        "            main_str += ', \\033[92m{:,}\\033[0m params'.format(total_params)\n",
        "        else:\n",
        "            main_str += ', {:,} params'.format(total_params)\n",
        "        return main_str, total_params\n",
        "\n",
        "    string, count = repr(model)\n",
        "    if file is not None:\n",
        "        if isinstance(file, str):\n",
        "            file = open(file, 'w')\n",
        "        print(string, file=file)\n",
        "        file.flush()\n",
        "\n",
        "    return count\n",
        "\n",
        "\n",
        "def grad_norm(model: nn.Module):\n",
        "    total_norm = 0\n",
        "    for p in model.parameters():\n",
        "        param_norm = p.grad.data.norm(2)\n",
        "        total_norm += param_norm.item() ** 2\n",
        "    return total_norm ** 0.5\n",
        "\n",
        "\n",
        "def distributed():\n",
        "    return dist.is_available() and dist.is_initialized()"
      ],
      "metadata": {
        "id": "dX42ci5Le7jn"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "source": [
        "## Data Loader"
      ],
      "metadata": {
        "id": "tMMwHRMpdc_K"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "import json\n",
        "from typing import List\n",
        "\n",
        "import torch\n",
        "from torch.utils.data import Dataset\n",
        "from tqdm import tqdm\n",
        "from transformers import PreTrainedTokenizer\n",
        "\n",
        "import re\n",
        "import unicodedata\n",
        "\n",
        "import nltk\n",
        "from nltk.corpus import stopwords\n",
        "from nltk.tag import pos_tag\n",
        "# from pycontractions import Contractions\n",
        "nltk.download('averaged_perceptron_tagger')\n",
        "nltk.download('stopwords')\n",
        "nltk.download('wordnet')\n",
        "\n",
        "\n",
        "CONTRACTION_MAP = { \"ain't\": \"is not\",\n",
        "                    \"aren't\": \"are not\",\n",
        "                    \"can't\": \"cannot\",\n",
        "                    \"can't've\": \"cannot have\",\n",
        "                    \"'cause\": \"because\",\n",
        "                    \"could've\": \"could have\",\n",
        "                    \"couldn't\": \"could not\",\n",
        "                    \"couldn't've\": \"could not have\",\n",
        "                    \"didn't\": \"did not\",\n",
        "                    \"doesn't\": \"does not\",\n",
        "                    \"don't\": \"do not\",\n",
        "                    \"hadn't\": \"had not\",\n",
        "                    \"hadn't've\": \"had not have\",\n",
        "                    \"hasn't\": \"has not\",\n",
        "                    \"haven't\": \"have not\",\n",
        "                    \"he'd\": \"he would\",\n",
        "                    \"he'd've\": \"he would have\",\n",
        "                    \"he'll\": \"he will\",\n",
        "                    \"he'll've\": \"he he will have\",\n",
        "                    \"he's\": \"he is\",\n",
        "                    \"how'd\": \"how did\",\n",
        "                    \"how'd'y\": \"how do you\",\n",
        "                    \"how'll\": \"how will\",\n",
        "                    \"how's\": \"how is\",\n",
        "                    \"I'd\": \"I would\",\n",
        "                    \"I ain't\": \"I am not\",\n",
        "                    \"I'd've\": \"I would have\",\n",
        "                    \"I'll\": \"I will\",\n",
        "                    \"I'll've\": \"I will have\",\n",
        "                    \"I'm\": \"I am\",\n",
        "                    \"I've\": \"I have\",\n",
        "                    \"i'd\": \"i would\",\n",
        "                    \"i'd've\": \"i would have\",\n",
        "                    \"i'll\": \"i will\",\n",
        "                    \"i'll've\": \"i will have\",\n",
        "                    \"i'm\": \"i am\",\n",
        "                    \"i've\": \"i have\",\n",
        "                    \"isn't\": \"is not\",\n",
        "                    \"it'd\": \"it would\",\n",
        "                    \"it'd've\": \"it would have\",\n",
        "                    \"it'll\": \"it will\",\n",
        "                    \"it'll've\": \"it will have\",\n",
        "                    \"it's\": \"it is\",\n",
        "                    \"let's\": \"let us\",\n",
        "                    \"ma'am\": \"madam\",\n",
        "                    \"mayn't\": \"may not\",\n",
        "                    \"might've\": \"might have\",\n",
        "                    \"mightn't\": \"might not\",\n",
        "                    \"mightn't've\": \"might not have\",\n",
        "                    \"must've\": \"must have\",\n",
        "                    \"mustn't\": \"must not\",\n",
        "                    \"mustn't've\": \"must not have\",\n",
        "                    \"needn't\": \"need not\",\n",
        "                    \"needn't've\": \"need not have\",\n",
        "                    \"o'clock\": \"of the clock\",\n",
        "                    \"oughtn't\": \"ought not\",\n",
        "                    \"oughtn't've\": \"ought not have\",\n",
        "                    \"shan't\": \"shall not\",\n",
        "                    \"sha'n't\": \"shall not\",\n",
        "                    \"shan't've\": \"shall not have\",\n",
        "                    \"she'd\": \"she would\",\n",
        "                    \"she'd've\": \"she would have\",\n",
        "                    \"she'll\": \"she will\",\n",
        "                    \"she'll've\": \"she will have\",\n",
        "                    \"she's\": \"she is\",\n",
        "                    \"should've\": \"should have\",\n",
        "                    \"shouldn't\": \"should not\",\n",
        "                    \"shouldn't've\": \"should not have\",\n",
        "                    \"so've\": \"so have\",\n",
        "                    \"so's\": \"so as\",\n",
        "                    \"that'd\": \"that would\",\n",
        "                    \"that'd've\": \"that would have\",\n",
        "                    \"that's\": \"that is\",\n",
        "                    \"there'd\": \"there would\",\n",
        "                    \"there'd've\": \"there would have\",\n",
        "                    \"there's\": \"there is\",\n",
        "                    \"they'd\": \"they would\",\n",
        "                    \"they'd've\": \"they would have\",\n",
        "                    \"they'll\": \"they will\",\n",
        "                    \"they'll've\": \"they will have\",\n",
        "                    \"they're\": \"they are\",\n",
        "                    \"they've\": \"they have\",\n",
        "                    \"to've\": \"to have\",\n",
        "                    \"wasn't\": \"was not\",\n",
        "                    \"we'd\": \"we would\",\n",
        "                    \"we'd've\": \"we would have\",\n",
        "                    \"we'll\": \"we will\",\n",
        "                    \"we'll've\": \"we will have\",\n",
        "                    \"we're\": \"we are\",\n",
        "                    \"we've\": \"we have\",\n",
        "                    \"weren't\": \"were not\",\n",
        "                    \"what'll\": \"what will\",\n",
        "                    \"what'll've\": \"what will have\",\n",
        "                    \"what're\": \"what are\",\n",
        "                    \"what's\": \"what is\",\n",
        "                    \"what've\": \"what have\",\n",
        "                    \"when's\": \"when is\",\n",
        "                    \"when've\": \"when have\",\n",
        "                    \"where'd\": \"where did\",\n",
        "                    \"where's\": \"where is\",\n",
        "                    \"where've\": \"where have\",\n",
        "                    \"who'll\": \"who will\",\n",
        "                    \"who'll've\": \"who will have\",\n",
        "                    \"who's\": \"who is\",\n",
        "                    \"who've\": \"who have\",\n",
        "                    \"why's\": \"why is\",\n",
        "                    \"why've\": \"why have\",\n",
        "                    \"will've\": \"will have\",\n",
        "                    \"won't\": \"will not\",\n",
        "                    \"won't've\": \"will not have\",\n",
        "                    \"would've\": \"would have\",\n",
        "                    \"wouldn't\": \"would not\",\n",
        "                    \"wouldn't've\": \"would not have\",\n",
        "                    \"y'all\": \"you all\",\n",
        "                    \"y'all'd\": \"you all would\",\n",
        "                    \"y'all'd've\": \"you all would have\",\n",
        "                    \"y'all're\": \"you all are\",\n",
        "                    \"y'all've\": \"you all have\",\n",
        "                    \"you'd\": \"you would\",\n",
        "                    \"you'd've\": \"you would have\",\n",
        "                    \"you'll\": \"you will\",\n",
        "                    \"you'll've\": \"you will have\",\n",
        "                    \"you're\": \"you are\",\n",
        "                    \"you've\": \"you have\"\n",
        "                    }\n",
        "\n",
        "\n",
        "class PreProcess:\n",
        "    def __init__(self, lowercase_norm=False, period_norm=False, special_chars_norm=False, accented_norm=False, contractions_norm=False,\n",
        "                 stemming_norm=False, lemma_norm=False, stopword_norm=False, proper_norm=False):\n",
        "\n",
        "        self.lowercase_norm = lowercase_norm\n",
        "        self.period_norm = period_norm\n",
        "        self.special_chars_norm = special_chars_norm\n",
        "        self.accented_norm = accented_norm\n",
        "        self.contractions_norm = contractions_norm\n",
        "        self.stemming_norm = stemming_norm\n",
        "        self.lemma_norm = lemma_norm\n",
        "        self.stopword_norm = stopword_norm\n",
        "        self.proper_norm = proper_norm\n",
        "\n",
        "    def lowercase_normalization(self, data):\n",
        "\n",
        "        return data.lower()\n",
        "\n",
        "    def period_remove(self, data):\n",
        "\n",
        "        return data.replace(\".\", \" \")\n",
        "\n",
        "    def special_char_remove(self, data, remove_digits=False):  # Remove special characters\n",
        "        tokens = self.tokenization(data)\n",
        "        special_char_norm_data = []\n",
        "\n",
        "        for token in tokens:\n",
        "            sentence = \"\"\n",
        "            for word in token:\n",
        "                sentence += word + \" \"\n",
        "            sentence.rstrip()\n",
        "\n",
        "            clean_remove = re.compile('<.*?>')\n",
        "            norm_sentence = re.sub(clean_remove, '', sentence)\n",
        "\n",
        "            norm_sentence = re.sub(r'[^\\x00-\\x7F]+','', norm_sentence)\n",
        "            norm_sentence = norm_sentence.replace(\"\\\\\", \"\")\n",
        "            norm_sentence = norm_sentence.replace(\"-\", \" \")\n",
        "            norm_sentence = norm_sentence.replace(\",\", \"\")\n",
        "            special_char_norm_data.append(norm_sentence)\n",
        "\n",
        "        return special_char_norm_data\n",
        "\n",
        "    def accented_word_normalization(self, data):  # Normalize accented chars/words\n",
        "        tokens = self.tokenization(data)\n",
        "        accented_norm_data = []\n",
        "\n",
        "        for token in tokens:\n",
        "            sentence = \"\"\n",
        "            for word in token:\n",
        "                sentence += word + \" \"\n",
        "            sentence.rstrip()\n",
        "            norm_sentence = unicodedata.normalize('NFKD', sentence).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n",
        "\n",
        "            accented_norm_data.append(norm_sentence)\n",
        "\n",
        "        return accented_norm_data\n",
        "\n",
        "    def expand_contractions(self, data, pycontrct=False):  # Expand contractions\n",
        "\n",
        "        # Simple contraction removal based on pre-defined set of contractions\n",
        "        contraction_mapping = CONTRACTION_MAP\n",
        "        contractions_pattern = re.compile('({})'.format('|'.join(contraction_mapping.keys())),\n",
        "                                          flags=re.IGNORECASE | re.DOTALL)\n",
        "\n",
        "        def expand_match(contraction):\n",
        "            match = contraction.group(0)\n",
        "            first_char = match[0]\n",
        "            expanded_contraction = contraction_mapping.get(match) \\\n",
        "                if contraction_mapping.get(match) \\\n",
        "                else contraction_mapping.get(match.lower())\n",
        "            expanded_contraction = first_char + expanded_contraction[1:]\n",
        "            return expanded_contraction\n",
        "\n",
        "        tokens = self.tokenization(data)\n",
        "        contraction_norm_data = []\n",
        "\n",
        "        for token in tokens:\n",
        "            sentence = \"\"\n",
        "            for word in token:\n",
        "                sentence += word + \" \"\n",
        "            sentence.rstrip()\n",
        "\n",
        "            expanded_text = contractions_pattern.sub(expand_match, sentence)\n",
        "            expanded_text = re.sub(\"'\", \"\", expanded_text)\n",
        "\n",
        "            contraction_norm_data.append(expanded_text)\n",
        "\n",
        "        return contraction_norm_data\n",
        "\n",
        "    def stemming(self, data):\n",
        "        stemmer = nltk.stem.PorterStemmer()\n",
        "        tokens = self.tokenization(data)\n",
        "        stemmed_data = []\n",
        "\n",
        "        for i in range(len(tokens)):\n",
        "            s1 = \" \".join(stemmer.stem(tokens[i][j]) for j in range(len(tokens[i])))\n",
        "            stemmed_data.append(s1)\n",
        "\n",
        "        return stemmed_data\n",
        "\n",
        "    def lemmatization(self, data):\n",
        "        lemma = nltk.stem.WordNetLemmatizer()\n",
        "        tokens = self.tokenization(data)\n",
        "        lemmatized_data = []\n",
        "\n",
        "        for i in range(len(tokens)):\n",
        "            s1 = \" \".join(lemma.lemmatize(tokens[i][j]) for j in range(len(tokens[i])))\n",
        "            lemmatized_data.append(s1)\n",
        "\n",
        "        return lemmatized_data\n",
        "\n",
        "    def stopword_remove(self, data):  # Remove special characters\n",
        "        filtered_sentence = []\n",
        "        stop_words = set(stopwords.words('english'))\n",
        "        data = self.tokenization(data)\n",
        "\n",
        "        for i in range(len(data)):\n",
        "            res = \"\"\n",
        "            for j in range(len(data[i])):\n",
        "                if data[i][j].lower() not in stop_words:\n",
        "                    res = res + \" \" + data[i][j]\n",
        "            filtered_sentence.append(res)\n",
        "\n",
        "        return filtered_sentence\n",
        "\n",
        "    def remove_proper_nouns(self, data):\n",
        "        common_words = []\n",
        "        data = self.tokenization(data)\n",
        "        for i in range(len(data)):\n",
        "            tagged_sent = pos_tag(data[i])\n",
        "            proper_nouns = [word for word, pos in tagged_sent if pos == 'NNP']\n",
        "            res = \"\"\n",
        "            for j in range(len(data[i])):\n",
        "                if data[i][j] not in proper_nouns:\n",
        "                    res = res + \" \" + data[i][j]\n",
        "            common_words.append(res)\n",
        "\n",
        "        return common_words\n",
        "\n",
        "    def tokenization(self, data):\n",
        "        tokens = []\n",
        "        for i in range(len(data)):\n",
        "            tokenizer = nltk.tokenize.WhitespaceTokenizer()\n",
        "            tokens.append(tokenizer.tokenize(data[i]))\n",
        "        return tokens\n",
        "\n",
        "    def fit(self, data):\n",
        "\n",
        "        data = [str(data)]\n",
        "\n",
        "        if self.special_chars_norm:\n",
        "            data = self.special_char_remove(data, remove_digits=False)\n",
        "\n",
        "        # if self.contractions_norm:\n",
        "        #     data = self.expand_contractions(data)\n",
        "\n",
        "        if self.accented_norm:\n",
        "            data = self.accented_word_normalization(data)\n",
        "\n",
        "        if self.stemming_norm:\n",
        "            data = self.stemming(data)\n",
        "\n",
        "        if self.proper_norm:\n",
        "            data = self.remove_proper_nouns(data)\n",
        "\n",
        "        if self.stopword_norm:\n",
        "            data = self.stopword_remove(data)\n",
        "\n",
        "        if self.lemma_norm:\n",
        "            data = self.lemmatization(data)\n",
        "\n",
        "        data = data[0]\n",
        "\n",
        "        if self.lowercase_norm:\n",
        "            data = self.lowercase_normalization(str(data))\n",
        "\n",
        "        if self.period_norm:\n",
        "            data = self.period_remove(str(data))\n",
        "\n",
        "        return data\n",
        "\n",
        "\n",
        "class EncodedDataset(Dataset):\n",
        "    def __init__(self, texts: List[str], labels: List[int], stylo_features: List[List], tokenizer: PreTrainedTokenizer, \n",
        "                 max_sequence_length: int = None, min_sequence_length: int = None):\n",
        "        self.texts = texts\n",
        "        self.labels = labels\n",
        "        self.stylo_features = stylo_features\n",
        "        self.tokenizer = tokenizer\n",
        "        self.max_sequence_length = max_sequence_length\n",
        "        self.min_sequence_length = min_sequence_length\n",
        "        \n",
        "    def __len__(self):\n",
        "        return len(self.texts)\n",
        "\n",
        "    def __getitem__(self, index):\n",
        "\n",
        "      text = self.texts[index]\n",
        "\n",
        "      label = self.labels[index]\n",
        "\n",
        "      style_feat = self.stylo_features[index].tolist()\n",
        "    \n",
        "      preprocessor = PreProcess(special_chars_norm=True, lowercase_norm=True, period_norm=True, proper_norm=True, accented_norm=True)\n",
        "\n",
        "      text = preprocessor.fit(text)\n",
        "\n",
        "      padded_sequences = self.tokenizer(text, padding='max_length', max_length= self.max_sequence_length, truncation=True)\n",
        "\n",
        "      return torch.tensor(padded_sequences['input_ids']), torch.tensor(padded_sequences['attention_mask']), torch.tensor(style_feat), int(label)\n",
        "\n",
        "\n",
        "class EncodeEvalData(Dataset):\n",
        "    def __init__(self, input_texts: List[str], stylo_features: List[List], tokenizer: PreTrainedTokenizer,\n",
        "                 max_sequence_length: int = None, min_sequence_length: int = None):\n",
        "\n",
        "        self.input_texts = input_texts\n",
        "        self.tokenizer = tokenizer\n",
        "        self.max_sequence_length = max_sequence_length\n",
        "        self.min_sequence_length = min_sequence_length\n",
        "        self.stylo_features = stylo_features\n",
        "        # self.style_extractor= Stylometry(phraseology_features= True, diversity_features = False, punct_analysis_features = True)\n",
        "\n",
        "\n",
        "    def __len__(self):\n",
        "        return len(self.input_texts)\n",
        "\n",
        "    def __getitem__(self, index):\n",
        "        text = self.input_texts[index]\n",
        "\n",
        "        stylo_features = self.stylo_features[index].tolist()\n",
        "        # Preprocessing\n",
        "        preprocessor = PreProcess(special_chars_norm=True, lowercase_norm=True, period_norm=True, proper_norm=True, accented_norm=True)\n",
        "\n",
        "        text = preprocessor.fit(text)\n",
        "\n",
        "        padded_sequences = self.tokenizer(text, padding='max_length', max_length=self.max_sequence_length, truncation=True)\n",
        "\n",
        "\n",
        "        return torch.tensor(padded_sequences['input_ids']), torch.tensor(padded_sequences['attention_mask']), torch.tensor(stylo_features)\n"
      ],
      "metadata": {
        "id": "1htLTvXkdhpJ",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "cd65ddb2-4616-4fab-ba13-70ec5a382f7d"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "[nltk_data] Downloading package averaged_perceptron_tagger to\n",
            "[nltk_data]     /root/nltk_data...\n",
            "[nltk_data]   Unzipping taggers/averaged_perceptron_tagger.zip.\n",
            "[nltk_data] Downloading package stopwords to /root/nltk_data...\n",
            "[nltk_data]   Unzipping corpora/stopwords.zip.\n",
            "[nltk_data] Downloading package wordnet to /root/nltk_data...\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "## Model Code"
      ],
      "metadata": {
        "id": "GoPm9R_FeV5j"
      }
    },
    {
      "cell_type": "markdown",
      "source": [
        "### Base AI Detector Model"
      ],
      "metadata": {
        "id": "go5H4TfpeZ1u"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "import torch\n",
        "from torch.nn import Softmax\n",
        "from torch.nn import CrossEntropyLoss, MSELoss\n",
        "from typing import Optional, Tuple\n",
        "\n",
        "from transformers import RobertaForSequenceClassification\n",
        "\n",
        "from transformers.modeling_outputs import SequenceClassifierOutput\n",
        "\n",
        "from dataclasses import dataclass\n",
        "\n",
        "from torch.nn.functional import normalize\n",
        "\n",
        "@dataclass\n",
        "class SequenceClassifierOutputWithLastLayer(SequenceClassifierOutput):\n",
        "\n",
        "    loss: Optional[torch.FloatTensor] = None\n",
        "    logits: torch.FloatTensor = None\n",
        "    last_hidden_state: torch.FloatTensor = None\n",
        "    hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n",
        "    attentions: Optional[Tuple[torch.FloatTensor]] = None\n",
        "\n",
        "\n",
        "class RobertaForFusion(RobertaForSequenceClassification):\n",
        "    _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n",
        "\n",
        "    def __init__(self, config):\n",
        "        super().__init__(config)\n",
        "\n",
        "        self.soft_max = Softmax(dim=1)\n",
        "\n",
        "    def forward(\n",
        "        self,\n",
        "        input_ids=None,\n",
        "        attention_mask=None,\n",
        "        token_type_ids=None,\n",
        "        position_ids=None,\n",
        "        head_mask=None,\n",
        "        inputs_embeds=None,\n",
        "        labels=None,\n",
        "        output_attentions=None,\n",
        "        output_hidden_states=None,\n",
        "        return_dict=None,\n",
        "    ):\n",
        "        r\"\"\"\n",
        "        labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n",
        "            Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n",
        "            config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n",
        "            If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n",
        "        \"\"\"\n",
        "        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n",
        "\n",
        "        outputs = self.roberta(\n",
        "            input_ids,\n",
        "            attention_mask=attention_mask,\n",
        "            token_type_ids=token_type_ids,\n",
        "            position_ids=position_ids,\n",
        "            head_mask=head_mask,\n",
        "            inputs_embeds=inputs_embeds,\n",
        "            output_attentions=output_attentions,\n",
        "            output_hidden_states=output_hidden_states,\n",
        "            return_dict=return_dict,\n",
        "        )\n",
        "        sequence_output = outputs[0]\n",
        "        logits = self.classifier(sequence_output)\n",
        "\n",
        "        loss = None\n",
        "        if labels is not None:\n",
        "            loss = None\n",
        "            if labels is not None:\n",
        "                if self.num_labels == 1:\n",
        "                    #  We are doing regression\n",
        "                    loss_fct = MSELoss()\n",
        "                    loss = loss_fct(logits.view(-1), labels.view(-1))\n",
        "                else:\n",
        "                    loss_fct = CrossEntropyLoss()\n",
        "                    loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n",
        "\n",
        "        softmax_logits = self.soft_max(logits)\n",
        "\n",
        "        if not return_dict:\n",
        "            output = (softmax_logits,) + outputs[2:]\n",
        "            return ((loss,) + output) if loss is not None else output\n",
        "\n",
        "        return SequenceClassifierOutputWithLastLayer(\n",
        "            loss=loss,\n",
        "            logits=softmax_logits,\n",
        "            last_hidden_state=sequence_output,\n",
        "            hidden_states=outputs.hidden_states,\n",
        "            attentions=outputs.attentions,\n",
        "        )\n",
        "\n"
      ],
      "metadata": {
        "id": "7bClPgQKeYO-"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "source": [
        "### J-Guard"
      ],
      "metadata": {
        "id": "u2CzReQteeb-"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "class FusedClassifier(torch.nn.Module):\n",
        "    def __init__(self, lm, device, FUSED_INPUT_SIZE):\n",
        "        super(FusedClassifier, self).__init__()\n",
        "\n",
        "        self.lm = lm\n",
        "        \n",
        "        # move to device\n",
        "        self.lm.to(device)\n",
        "\n",
        "        self.guidance_head = nn.Sequential(\n",
        "            nn.Linear(FUSED_INPUT_SIZE, 1024),\n",
        "            nn.ReLU(),\n",
        "            nn.Linear(1024, 256)\n",
        "        ).to(device)\n",
        "\n",
        "        self.classification_head = nn.Sequential(\n",
        "            nn.Linear(256, 32),\n",
        "            nn.ReLU(),\n",
        "            nn.Linear(32, 2),\n",
        "            nn.Softmax(dim=-1)\n",
        "        ).to(device)\n",
        "\n",
        "        # the LM is already pre-trained, no need to calc grads anymore\n",
        "        for param in self.lm.parameters():\n",
        "            param.requires_grad = False\n",
        "\n",
        "    def forward(self, data, custom_features):\n",
        "\n",
        "        if len(data) < 3:\n",
        "          output_dic = self.lm(data[0], attention_mask=data[1])\n",
        "        \n",
        "        else:\n",
        "          output_dic = self.lm(data[0], attention_mask=data[1], labels=data[2])\n",
        "\n",
        "        lm_emb_output = output_dic[\"last_hidden_state\"][:, -1, :].detach()\n",
        "\n",
        "        # append manuall features to Roberta features\n",
        "        x = torch.cat((lm_emb_output, custom_features), axis=-1)\n",
        "        x = normalize(x)\n",
        "        c = self.guidance_head(x)\n",
        "\n",
        "        return self.classification_head(c)"
      ],
      "metadata": {
        "id": "NtRD1A9f55Xw"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "source": [
        "## Train Code"
      ],
      "metadata": {
        "id": "oo22No0JfGE-"
      }
    },
    {
      "cell_type": "markdown",
      "source": [
        "### J-Guard Training"
      ],
      "metadata": {
        "id": "7z0ryOiCfT0A"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "\"\"\"Training code for the detector model\"\"\"\n",
        "\n",
        "import argparse\n",
        "import pandas as pd\n",
        "import os\n",
        "import subprocess\n",
        "import sys\n",
        "from itertools import count\n",
        "from multiprocessing import Process\n",
        "\n",
        "import torch\n",
        "import torch.distributed as dist\n",
        "from torch import nn\n",
        "from torch.nn.parallel import DistributedDataParallel\n",
        "from torch.optim import Adam\n",
        "from torch.utils.data import DataLoader, DistributedSampler, RandomSampler\n",
        "from tqdm import tqdm\n",
        "# from transformers import *\n",
        "from transformers import RobertaTokenizer\n",
        "\n",
        "from types import SimpleNamespace\n",
        "\n",
        "torch.manual_seed(int(1000))"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "oRKctjtwZzta",
        "outputId": "60507770-27ad-4b4e-c0c7-c36bfe4f5fd1"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "<torch._C.Generator at 0x7f6fe0160dd0>"
            ]
          },
          "metadata": {},
          "execution_count": 6
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "def setup_distributed(port=29500):\n",
        "    if not dist.is_available() or not torch.cuda.is_available() or torch.cuda.device_count() <= 1:\n",
        "        return 0, 1\n",
        "\n",
        "    if 'MPIR_CVAR_CH3_INTERFACE_HOSTNAME' in os.environ:\n",
        "        from mpi4py import MPI\n",
        "        mpi_rank = MPI.COMM_WORLD.Get_rank()\n",
        "        mpi_size = MPI.COMM_WORLD.Get_size()\n",
        "\n",
        "        os.environ[\"MASTER_ADDR\"] = '127.0.0.1'\n",
        "        os.environ[\"MASTER_PORT\"] = str(port)\n",
        "\n",
        "        dist.init_process_group(backend=\"nccl\", world_size=mpi_size, rank=mpi_rank)\n",
        "        return mpi_rank, mpi_size\n",
        "\n",
        "    dist.init_process_group(backend=\"nccl\", init_method=\"env://\")\n",
        "    return dist.get_rank(), dist.get_world_size()\n",
        "\n",
        "\n",
        "def load_datasets(text_dir, stylo_dir, dataset_name, imp_feat, tokenizer, batch_size,\n",
        "                  max_sequence_length, random_sequence_length=True):\n",
        "\n",
        "    data_path = text_dir+dataset_name+\"/CSV/\"\n",
        "    data_train = pd.read_csv(data_path+\"train.csv\")\n",
        "    data_test = pd.read_csv(data_path+\"test.csv\")\n",
        "\n",
        "    stylo_feat_train = pd.read_csv(stylo_dir+dataset_name+\"_train_feature.csv\")\n",
        "    stylo_feat_test= pd.read_csv(stylo_dir+dataset_name+\"_test_feature.csv\")\n",
        "\n",
        "    Sampler = DistributedSampler if distributed() and dist.get_world_size() > 1 else RandomSampler\n",
        "\n",
        "    min_sequence_length = 10 if random_sequence_length else None\n",
        "\n",
        "\n",
        "    train_dataset = EncodedDataset(data_train.text.values, data_train.label.values, stylo_feat_train[imp_feat].values, tokenizer, max_sequence_length, min_sequence_length)\n",
        "    train_loader = DataLoader(train_dataset, batch_size, sampler=Sampler(train_dataset), num_workers=0)\n",
        "\n",
        "    validation_dataset = EncodedDataset(data_test.text.values, data_test.label.values, stylo_feat_test[imp_feat].values, tokenizer, max_sequence_length, min_sequence_length)\n",
        "    validation_loader = DataLoader(validation_dataset, batch_size=1, sampler=Sampler(validation_dataset))\n",
        "\n",
        "    return train_loader, validation_loader\n",
        "\n",
        "\n",
        "def accuracy_sum(logits, labels):\n",
        "    if list(logits.shape) == list(labels.shape) + [2]:\n",
        "        # 2-d outputs\n",
        "        classification = (logits[..., 0] < logits[..., 1]).long().flatten()\n",
        "    else:\n",
        "        classification = (logits > 0).long().flatten()\n",
        "    assert classification.shape == labels.shape\n",
        "    return (classification == labels).float().sum().item()\n",
        "\n",
        "\n",
        "def train(model: nn.Module, optimizer, device: str, loader: DataLoader, desc='Train'):\n",
        "    model.train()\n",
        "\n",
        "    train_accuracy = 0\n",
        "    train_epoch_size = 0\n",
        "    train_loss = 0\n",
        "\n",
        "    with tqdm(loader, desc=desc, disable=distributed() and dist.get_rank() > 0) as loop:\n",
        "        for texts, masks, custom_features, labels in loop:\n",
        "\n",
        "            texts, masks, custom_features, labels = texts.to(device), masks.to(device), custom_features.to(device), labels.to(device)\n",
        "            batch_size = texts.shape[0]\n",
        "\n",
        "            optimizer.zero_grad()\n",
        "            predict_label = model(data=[texts, masks, labels], custom_features = custom_features)\n",
        "\n",
        "            loss_fct = CrossEntropyLoss()\n",
        "            loss = loss_fct(predict_label, labels)\n",
        "\n",
        "            loss.backward()\n",
        "            optimizer.step()\n",
        "\n",
        "            batch_accuracy = accuracy_sum(predict_label, labels)\n",
        "            train_accuracy += batch_accuracy\n",
        "            train_epoch_size += batch_size\n",
        "            train_loss += loss.item() * batch_size\n",
        "\n",
        "            loop.set_postfix(loss=loss.item(), acc=train_accuracy / train_epoch_size)\n",
        "\n",
        "    return {\n",
        "        \"train/accuracy\": train_accuracy,\n",
        "        \"train/epoch_size\": train_epoch_size,\n",
        "        \"train/loss\": train_loss\n",
        "    }\n",
        "\n",
        "\n",
        "def validate(model: nn.Module, device: str, loader: DataLoader, votes=1, desc='Validation'):\n",
        "    model.eval()\n",
        "\n",
        "    validation_accuracy = 0\n",
        "    validation_epoch_size = 0\n",
        "    validation_loss = 0\n",
        "\n",
        "    records = [record for v in range(votes) for record in tqdm(loader, desc=f'Preloading data ... {v}',\n",
        "                                                               disable=distributed() and dist.get_rank() > 0)]\n",
        "    records = [[records[v * len(loader) + i] for v in range(votes)] for i in range(len(loader))]\n",
        "\n",
        "    with tqdm(records, desc=desc, disable=distributed() and dist.get_rank() > 0) as loop, torch.no_grad():\n",
        "        for example in loop:\n",
        "            losses = []\n",
        "            logit_votes = []\n",
        "\n",
        "            for texts, masks, custom_features, labels in example:\n",
        "\n",
        "              texts, masks, custom_features, labels = texts.to(device), masks.to(device), custom_features.to(device), labels.to(device)\n",
        "              batch_size = texts.shape[0]\n",
        "\n",
        "              predict_label = model(data=[texts, masks, labels], custom_features = custom_features)\n",
        "\n",
        "              loss_fct = CrossEntropyLoss()\n",
        "              loss = loss_fct(predict_label, labels)\n",
        "              losses.append(loss)\n",
        "              logit_votes.append(predict_label)\n",
        "\n",
        "            loss = torch.stack(losses).mean(dim=0)\n",
        "            logits = torch.stack(logit_votes).mean(dim=0)\n",
        "\n",
        "            batch_accuracy = accuracy_sum(logits, labels)\n",
        "            validation_accuracy += batch_accuracy\n",
        "            validation_epoch_size += batch_size\n",
        "            validation_loss += loss.item() * batch_size\n",
        "\n",
        "            loop.set_postfix(loss=loss.item(), acc=validation_accuracy / validation_epoch_size)\n",
        "\n",
        "    return {\n",
        "        \"validation/accuracy\": validation_accuracy,\n",
        "        \"validation/epoch_size\": validation_epoch_size,\n",
        "        \"validation/loss\": validation_loss\n",
        "    }\n",
        "\n",
        "\n",
        "def _all_reduce_dict(d, device):\n",
        "    # wrap in tensor and use reduce to gpu0 tensor\n",
        "    output_d = {}\n",
        "    for (key, value) in sorted(d.items()):\n",
        "        tensor_input = torch.tensor([[value]]).to(device)\n",
        "        # torch.distributed.all_reduce(tensor_input)\n",
        "        output_d[key] = tensor_input.item()\n",
        "    return output_d\n",
        "\n",
        "\n",
        "def run(params):\n",
        "\n",
        "    rank, world_size = setup_distributed()\n",
        "\n",
        "    if params.device is None:\n",
        "        device = f'cuda:{rank}' if torch.cuda.is_available() else 'cpu'\n",
        "\n",
        "    print('rank:', rank, 'world_size:', world_size, 'device:', device)\n",
        "\n",
        "    import torch.distributed as dist\n",
        "    if distributed() and rank > 0:\n",
        "        dist.barrier()\n",
        "\n",
        "    model_name = 'roberta-large' if params.large else 'roberta-base'\n",
        "    # tokenization_utils.logger.setLevel('ERROR')\n",
        "    tokenizer = RobertaTokenizer.from_pretrained(model_name)\n",
        "    lm = RobertaForFusion.from_pretrained(model_name).to(device)\n",
        "\n",
        "    # Load the model from checkpoints\n",
        "    if params.load_from_checkpoint:\n",
        "        if device == \"cpu\":\n",
        "            lm.load_state_dict(torch.load((params.checkpoint_dir + '{}.pt').format(params.checkpoint_name),\n",
        "                                             map_location='cpu')['model_state_dict'])\n",
        "        else:\n",
        "            lm.load_state_dict(\n",
        "                torch.load((params.checkpoint_dir + '{}.pt').format(params.checkpoint_name))['model_state_dict'])\n",
        "\n",
        "\n",
        "    model = FusedClassifier(lm=lm, device=device, FUSED_INPUT_SIZE=params.FUSED_INPUT_SIZE)\n",
        "    \n",
        "    if rank == 0:\n",
        "        summary(model)\n",
        "        if distributed():\n",
        "            dist.barrier()\n",
        "\n",
        "    if world_size > 1:\n",
        "        model = DistributedDataParallel(model, [rank], output_device=rank, find_unused_parameters=True)\n",
        "\n",
        "    train_loader, validation_loader = load_datasets(params.text_dir, params.stylo_dir, params.dataset_name, params.imp_feat, tokenizer, params.batch_size, params.max_sequence_length)\n",
        "\n",
        "    optimizer = Adam(model.parameters(), lr=params.learning_rate, weight_decay=params.weight_decay)\n",
        "    epoch_loop = count(1) if params.max_epochs is None else range(1, params.max_epochs + 1)\n",
        "\n",
        "    best_validation_accuracy = 0\n",
        "    without_progress = 0\n",
        "    earlystop_epochs = 3\n",
        "\n",
        "    for epoch in epoch_loop:\n",
        "        if world_size > 1:\n",
        "            train_loader.sampler.set_epoch(epoch)\n",
        "            validation_loader.sampler.set_epoch(epoch)\n",
        "\n",
        "        train_metrics = train(model, optimizer, device, train_loader, f'Epoch {epoch}')\n",
        "        validation_metrics = validate(model, device, validation_loader)\n",
        "\n",
        "        combined_metrics = _all_reduce_dict({**validation_metrics, **train_metrics}, device)\n",
        "\n",
        "        combined_metrics[\"train/accuracy\"] /= combined_metrics[\"train/epoch_size\"]\n",
        "        combined_metrics[\"train/loss\"] /= combined_metrics[\"train/epoch_size\"]\n",
        "        combined_metrics[\"validation/accuracy\"] /= combined_metrics[\"validation/epoch_size\"]\n",
        "        combined_metrics[\"validation/loss\"] /= combined_metrics[\"validation/epoch_size\"]\n",
        "\n",
        "        if rank == 0:\n",
        "\n",
        "            if combined_metrics[\"validation/accuracy\"] > best_validation_accuracy:\n",
        "                without_progress = 0\n",
        "                best_validation_accuracy = combined_metrics[\"validation/accuracy\"]\n",
        "\n",
        "                model_to_save = model.module if hasattr(model, 'module') else model\n",
        "                torch.save(dict(\n",
        "                        epoch=epoch,\n",
        "                        model_state_dict=model_to_save.state_dict(),\n",
        "                        optimizer_state_dict=optimizer.state_dict()\n",
        "                    ),\n",
        "                    os.path.join(params.out_dir, params.dataset_name+\"_roberta_fusion_jstylo.pt\")\n",
        "                )\n",
        "\n",
        "        without_progress += 1\n",
        "\n",
        "        if without_progress >= earlystop_epochs:\n",
        "            break\n",
        "\n",
        "\n",
        "if __name__ == '__main__':\n",
        "    \n",
        "    imp_list = ['mean_word_count_sent', 'mean_sent_count_para', 'apos_mean_count', 'apos_mean_count_para','comma_mean_count', 'comma_mean_count_para', 'hash_mean_count', 'hash_mean_count_para', 'wc_lead_sent', 'wc_lead_para', 'num_count', 'passive_sent_count',\n",
        "       'past_tense_count', 'temp_inconsis']\n",
        "\n",
        "    params = {\"max_sequence_length\": 512, \n",
        "            \"learning_rate\" : 2e-5, \n",
        "            \"batch_size\" : 8, \n",
        "            \"val_batch_size\" : 16, \n",
        "            \"max_epochs\" : 10, \n",
        "            \"epoch_size\" : None,\n",
        "            \"device\" : None,\n",
        "            \"large\" : False,\n",
        "            \"seed\" : 1024,\n",
        "            \"checkpoint_dir\": \"\",\n",
        "            \"load_from_checkpoint\": False,\n",
        "            \"weight_decay\": 0,\n",
        "            \"FUSED_INPUT_SIZE\": 768 + len(imp_list),\n",
        "            \"hidden_size\" : 768,\n",
        "            \"num_features\" : 2,\n",
        "            \"num_labels\": 2,\n",
        "            \"classifier_dropout\" : 0.5, \n",
        "            \"out_dir\": \"\",\n",
        "            \"text_dir\": \"\",\n",
        "            \"stylo_dir\":\"\" , \n",
        "            \"dataset_name\": \"\", \n",
        "            \"imp_feat\":imp_list,\n",
        "            }\n",
        "    params = SimpleNamespace(**params)\n",
        "\n",
        "    nproc = int(subprocess.check_output([sys.executable, '-c', \"import torch;\"\n",
        "                                         \"print(torch.cuda.device_count() if torch.cuda.is_available() else 1)\"]))\n",
        "    if nproc > 1:\n",
        "        print(f'Launching {nproc} processes ...', file=sys.stderr)\n",
        "\n",
        "        os.environ[\"MASTER_ADDR\"] = '127.0.0.1'\n",
        "        os.environ[\"MASTER_PORT\"] = str(29500)\n",
        "        os.environ['WORLD_SIZE'] = str(nproc)\n",
        "        os.environ['OMP_NUM_THREAD'] = str(1)\n",
        "        subprocesses = []\n",
        "\n",
        "        for i in range(nproc):\n",
        "            os.environ['RANK'] = str(i)\n",
        "            os.environ['LOCAL_RANK'] = str(i)\n",
        "            process = Process(target=run, kwargs=params)\n",
        "            process.start()\n",
        "            subprocesses.append(process)\n",
        "\n",
        "        for process in subprocesses:\n",
        "            process.join()\n",
        "    else:\n",
        "        filter_models = [\"TT_chatgpt\"]\n",
        "        \n",
        "        for model in filter_models:\n",
        "          params.dataset_name = model\n",
        "\n",
        "          print(\"--------------------------------------------------------------\")\n",
        "          print(\"Model running on:\")\n",
        "          print(params)\n",
        "          run(params)\n",
        "          print()\n",
        "          print(\"--------------------------------------------------------------\")"
      ],
      "metadata": {
        "id": "zWdnBc_Olnlr"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "source": [
        "## Evaluation"
      ],
      "metadata": {
        "id": "PE77SnV-bK_j"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "import math\n",
        "import torch\n",
        "import argparse\n",
        "from tqdm import tqdm\n",
        "import pandas as pd\n",
        "import random\n",
        "import time\n",
        "\n",
        "from torch.utils.data import DataLoader\n",
        "\n",
        "\n",
        "from sklearn.metrics import roc_curve\n",
        "from sklearn.metrics import roc_auc_score\n",
        "from matplotlib import pyplot\n",
        "\n",
        "from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\n",
        "\n",
        "import decimal\n",
        "\n",
        "\n",
        "from transformers import *\n",
        "\n",
        "\n",
        "def float_range(start, stop, step):\n",
        "    while start < stop:\n",
        "        yield float(start)\n",
        "        start += decimal.Decimal(step)\n",
        "\n",
        "\n",
        "def calculate_program_metrics(far, pd):\n",
        "\n",
        "    pd_at_far = 0.0\n",
        "    pd_at_eer = 0.0\n",
        "    far_at_eer = 0.0\n",
        "\n",
        "    for i in range(len(far)):\n",
        "      if far[i] > 0.1:\n",
        "        pd_at_far = pd[i-1]\n",
        "        break\n",
        "\n",
        "    for i in range(len(far)):\n",
        "      if pd[i] > 1 - far[i]:\n",
        "        pd_at_eer = (pd[i-1] + pd[i])/2\n",
        "        far_at_eer = (far[i-1] + far[i])/2\n",
        "        break\n",
        "    \n",
        "    \n",
        "    print(\"pD @ 0.1 FAR = %.3f\" % (pd_at_far))\n",
        "    print(\"pD @ EER = %.3f\" % (pd_at_eer))\n",
        "    print(\"FAR @ EER = %.3f\" % (far_at_eer))\n",
        "\n",
        "\n",
        "\n",
        "class GeneratedTextDetection:\n",
        "    \"\"\"\n",
        "    Artifact class\n",
        "    \"\"\"\n",
        "\n",
        "    def __init__(self, args):\n",
        "        torch.manual_seed(1000)\n",
        "\n",
        "        self.args = args\n",
        "\n",
        "        # Load the model from checkpoints\n",
        "        self.init_dict = self._init_detector()\n",
        "\n",
        "    def _init_detector(self):\n",
        "\n",
        "        init_dict = {\"kn_model\": None, \"kn_tokenizer\": None,\n",
        "                    \"unk_model\": None, \"unk_tokenizer\": None,\n",
        "                   \"attr_model\": None, \"attr_tokenizer\": None, }\n",
        "\n",
        "        if self.args.init_method == \"fused\":\n",
        "            model_name = 'roberta-large' if self.args.large else 'roberta-base'\n",
        "            tokenization_utils.logger.setLevel('ERROR')\n",
        "            tokenizer = RobertaTokenizer.from_pretrained(model_name)\n",
        "            lm = RobertaForFusion.from_pretrained(model_name).to(self.args.device)\n",
        "\n",
        "            model = FusedClassifier(lm=lm, device=self.args.device, FUSED_INPUT_SIZE=self.args.FUSED_INPUT_SIZE)\n",
        "            # Load the model from checkpoints\n",
        "            if self.args.device == \"cpu\":\n",
        "                model.load_state_dict(torch.load((self.args.check_point + '{}.pt').format(self.args.known_model_name),\n",
        "                                                 map_location='cpu')['model_state_dict'])\n",
        "            else:\n",
        "                print((self.args.check_point + '{}.pt').format(self.args.known_model_name))\n",
        "                model.load_state_dict(\n",
        "                    torch.load((self.args.check_point + '{}.pt').format(self.args.known_model_name))['model_state_dict'])\n",
        "            \n",
        "            init_dict[\"kn_model\"] = model\n",
        "            init_dict[\"kn_tokenizer\"] = tokenizer\n",
        "            return init_dict\n",
        "\n",
        "      \n",
        "    def evaluate(self, input_text, stylo_feat):\n",
        "        \"\"\"\n",
        "           Method that runs the evaluation and generate scores and evidence\n",
        "        \"\"\"\n",
        "\n",
        "        # Encapsulate the inputs\n",
        "        eval_dataset = EncodeEvalData(input_text, stylo_feat, self.init_dict[\"kn_tokenizer\"], self.args.max_sequence_length)\n",
        "        eval_loader = DataLoader(eval_dataset)\n",
        "\n",
        "        # Dictionary will contain all the scores and evidences generated by the model\n",
        "        results = {\"cls\": [], \"LLR_score\": [], \"prob_score\": {\"cls_0\": [], \"cls_1\": []}, \"generator\": None}\n",
        "\n",
        "        # Set eval mode\n",
        "        if self.args.init_method == \"fused\":\n",
        "            self.init_dict[\"kn_model\"].eval()\n",
        "\n",
        "      \n",
        "        with torch.no_grad():\n",
        "              for texts, masks, custom_features in eval_loader:\n",
        "                  texts, masks, custom_features = texts.to(self.args.device), masks.to(self.args.device), custom_features.to(self.args.device)\n",
        "\n",
        "                  if self.args.init_method == \"fused\":\n",
        "                      # Individual model take care all the probes\n",
        "                      output_dic = self.init_dict[\"kn_model\"](data=[texts, masks], custom_features = custom_features)\n",
        "                      disc_out = output_dic\n",
        "\n",
        "                      cls0_prob = disc_out[:, 0].tolist()\n",
        "                      cls1_prob = disc_out[:, 1].tolist()\n",
        "\n",
        "                      results[\"prob_score\"][\"cls_0\"].extend(cls0_prob)\n",
        "                      results[\"prob_score\"][\"cls_1\"].extend(cls1_prob)\n",
        "\n",
        "                      # prior_llr = math.log10(0.5/0.5)\n",
        "\n",
        "                      # results[\"LLR_score\"].extend([math.log10(prob/(1-prob)) + prior_llr for prob in cls1_prob])\n",
        "\n",
        "                      _, predicted = torch.max(disc_out, 1)\n",
        "\n",
        "                      results[\"cls\"].extend(predicted.tolist())\n",
        "                     \n",
        "        return results\n",
        "\n",
        "\n",
        "\n",
        "def main():\n",
        "  \n",
        "\n",
        "    imp_list = ['mean_word_count_sent', 'mean_sent_count_para', 'apos_mean_count', 'apos_mean_count_para', 'wc_lead_sent', 'wc_lead_para', 'num_count', 'passive_sent_count',\n",
        "        'past_tense_count', 'temp_inconsis']\n",
        "\n",
        "    args = {\"max_sequence_length\": 512, \n",
        "            \"learning_rate\" : 2e-5, \n",
        "            \"batch_size\" : 8, \n",
        "            \"val_batch_size\" : 16, \n",
        "            \"max_epochs\" : 10, \n",
        "            \"epoch_size\" : None,\n",
        "            \"device\" : None,\n",
        "            \"large\" : False,\n",
        "            \"seed\" : 1024,\n",
        "            \"check_point\": \"\",\n",
        "            \"load_from_checkpoint\": False,\n",
        "            \"weight_decay\": 0,\n",
        "            \"FUSED_INPUT_SIZE\": 768 + len(imp_list),\n",
        "            \"hidden_size\" : 768,\n",
        "            \"num_features\" : 2,\n",
        "            \"num_labels\": 2,\n",
        "            \"classifier_dropout\" : 0.5, \n",
        "            \"out_dir\": \"\",\n",
        "            \"text_dir\": \"\",\n",
        "            \"stylo_dir\":\"\" , \n",
        "            \"dataset_name\": \"\", \n",
        "            \"imp_feat\":imp_list,\n",
        "            \"known_model_name\":\"\", \n",
        "            \"init_method\":\"fused\"\n",
        "            }\n",
        "    args = SimpleNamespace(**args)\n",
        "\n",
        "    if args.device is None:\n",
        "        args.device = f'cuda:{0}' if torch.cuda.is_available() else 'cpu'\n",
        "\n",
        "    filter_models = [\"TT_chatgpt\",\"TT_ctrl\", \"TT_gpt3\", \"TT_grover_mega\", \"TT_pplm_gpt2\", \"TT_gpt2_large\"]\n",
        "\n",
        "        \n",
        "    for model in filter_models:\n",
        "      args.dataset_name = model\n",
        "\n",
        "      args.known_model_name = args.dataset_name+\"_roberta_fusion_jstylo\"\n",
        "\n",
        "      print(\"--------------------------------------------------------------\")\n",
        "      print(\"Model running on: \", args.dataset_name)\n",
        "      \n",
        "      predict_prob = []\n",
        "\n",
        "      y = []\n",
        "      artifact = GeneratedTextDetection(args)\n",
        "\n",
        "      test_data = pd.read_csv(\"\"+args.dataset_name+\"/CSV/test.csv\")\n",
        "      stylo_feat = pd.read_csv(\"\"+args.dataset_name+\"_test_feature.csv\")\n",
        "\n",
        "      multiple_lines = 0\n",
        "\n",
        "      tp = 0\n",
        "      tn = 0\n",
        "      fn = 0\n",
        "      fp = 0  \n",
        "\n",
        "      results = artifact.evaluate(test_data.text.values.tolist(), stylo_feat[imp_list].values)\n",
        "\n",
        "      for i, value in tqdm(test_data.iterrows()):\n",
        "\n",
        "        y.append(value.label)\n",
        "\n",
        "        predict_prob.append(results[\"prob_score\"]['cls_1'][i])\n",
        "\n",
        "        predicted = results[\"cls\"][i]\n",
        "\n",
        "        tp += ((predicted == value.label) & (value.label == 1))\n",
        "        tn += ((predicted == value.label) & (value.label == 0))\n",
        "        fn += ((predicted != value.label) & (value.label == 1))\n",
        "        fp += ((predicted != value.label) & (value.label == 0))\n",
        "\n",
        "      recall = float(tp) / (tp+fn)\n",
        "      precision = float(tp) / (tp+fp)\n",
        "      f1_score = 2 * float(precision) * recall / (precision + recall)\n",
        "\n",
        "      print('TP: %d' % (\n",
        "          tp))\n",
        "      print('TN: %d' % (\n",
        "          tn))\n",
        "      print('FP: %d' % (\n",
        "          fp))\n",
        "      print('FN: %d' % (\n",
        "          fn))\n",
        "\n",
        "      print('Accuracy of the discriminator: %d %%' % (\n",
        "              100 * (tp + tn) / (tp + tn + fp + fn)))\n",
        "      print('Recall of the discriminator: %d %%' % (\n",
        "          100 * recall))\n",
        "      print('Precision of the discriminator: %d %%' % (\n",
        "          100 * precision))\n",
        "      print('f1_score of the discriminator: %d %%' % (\n",
        "          100 * f1_score))\n",
        "      \n",
        "\n",
        "      # calculate scores\n",
        "      lr_auc = roc_auc_score(y, predict_prob)\n",
        "\n",
        "      # summarize scores\n",
        "      print(\"\\n\")\n",
        "      print(\" ----- Extra Metrics -----\")\n",
        "      print()\n",
        "      print('Classifier: ROC AUC=%.3f' % (lr_auc))\n",
        "\n",
        "      # calculate roc curves\n",
        "      lr_fpr, lr_tpr, _ = roc_curve(y, predict_prob)\n",
        "\n",
        "      calculate_program_metrics(lr_fpr, lr_tpr)\n",
        "\n",
        "      eq_fpr = list(float_range(0, 1, 1 / len(lr_fpr)))\n",
        "      eq_tpr = [item for item in eq_fpr]\n",
        "\n",
        "      # plot the roc curve for the model\n",
        "      pyplot.plot(lr_fpr, lr_tpr, marker='.', label='Roberta')\n",
        "      pyplot.plot(eq_fpr, eq_tpr, marker='.', label='Random Chance')\n",
        "      # axis labels\n",
        "\n",
        "      pyplot.xlabel('Probability of False Alarm')\n",
        "      pyplot.ylabel('Probability of Detection')\n",
        "      # show the legend\n",
        "      pyplot.legend()\n",
        "      # show the plot\n",
        "      pyplot.show()\n",
        "\n",
        "      print()\n",
        "      print(\"--------------------------------------------------------------\")\n",
        "    \n",
        "\n",
        "if __name__ == \"__main__\":\n",
        "    main()\n"
      ],
      "metadata": {
        "id": "2ajTcNFXbM5_"
      },
      "execution_count": null,
      "outputs": []
    }
  ],
  "metadata": {
    "accelerator": "GPU",
    "colab": {
      "provenance": [],
      "collapsed_sections": [
        "Nd2k_imVfLuD"
      ],
      "toc_visible": true
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}