{
  "cells": [
    {
      "cell_type": "code",
      "source": [
        "from google.colab import drive\n",
        "drive.mount('/content/drive')"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "9PtuiHA1DWp7",
        "outputId": "6a5e97fa-c384-4f53-c59d-b164e6d75f94"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Mounted at /content/drive\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "IDk6_rLx0M-p"
      },
      "source": [
        "## Journalism Features"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "5Xt3haxT0YnO"
      },
      "source": [
        "### Word Count, Sentence Count, Word count in a Sentence"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "1EGDAtWW0LXw"
      },
      "outputs": [],
      "source": [
        "import nltk\n",
        "nltk.download('punkt')\n",
        "from nltk.tokenize import word_tokenize \n",
        "from nltk.tokenize import sent_tokenize\n",
        "import re\n",
        "import numpy as np\n",
        "\n",
        "import string \n",
        "\n",
        "import nltk\n",
        "nltk.download('punkt')\n",
        "from nltk.tokenize import word_tokenize \n",
        "from nltk.tokenize import sent_tokenize\n",
        "import re\n",
        "import spacy\n",
        "# Spacy model imported\n",
        "!python -m spacy download en_core_web_lg\n",
        "nlp = spacy.load(\"en_core_web_lg\")\n",
        "\n",
        "from nltk import pos_tag\n",
        "nltk.download('averaged_perceptron_tagger')\n",
        "\n",
        "def word_count(document):\n",
        "\n",
        "  tokens = word_tokenize(document)\n",
        "\n",
        "  nonPunct = re.compile('.*[A-Za-z0-9].*')  # must contain a letter or digit\n",
        "  \n",
        "  filtered = [w for w in tokens if nonPunct.match(w)]\n",
        "\n",
        "  return len(filtered)\n",
        "\n",
        "def sentence_count(document):\n",
        "\n",
        "  tokens = sent_tokenize(document)\n",
        "\n",
        "  nonPunct = re.compile('.*[A-Za-z0-9].*')  # must contain a letter or digit\n",
        "  \n",
        "  filtered = [w for w in tokens if nonPunct.match(w)]\n",
        "\n",
        "  return len(filtered)\n",
        "\n",
        "def paragraph_count(document):\n",
        "\n",
        "  tokens = document.splitlines()\n",
        "\n",
        "  nonPunct = re.compile('.*[A-Za-z0-9].*')  # must contain a letter or digit\n",
        "  \n",
        "  filtered = [w for w in tokens if nonPunct.match(w)]\n",
        "\n",
        "  return len(filtered)\n",
        "\n",
        "def word_count_sent(document):\n",
        "\n",
        "  tokens = sent_tokenize(document)\n",
        "\n",
        "  nonPunct = re.compile('.*[A-Za-z0-9].*')  # must contain a letter or digit\n",
        "  \n",
        "  filtered = [w for w in tokens if nonPunct.match(w)]\n",
        "\n",
        "  word_counts = [word_count(sent) for sent in filtered]\n",
        "\n",
        "  if len(word_counts) ==0:\n",
        "\n",
        "    return 0, 0\n",
        "\n",
        "  mean = sum(word_counts) / len(word_counts)\n",
        "  variance = sum([((x - mean) ** 2) for x in word_counts]) / len(word_counts)\n",
        "  res = variance ** 0.5\n",
        "\n",
        "  return mean, res\n",
        "\n",
        "def word_count_para(document):\n",
        "\n",
        "  tokens = document.splitlines()\n",
        "\n",
        "  nonPunct = re.compile('.*[A-Za-z0-9].*')  # must contain a letter or digit\n",
        "  \n",
        "  filtered = [w for w in tokens if nonPunct.match(w)]\n",
        "\n",
        "  word_counts = [word_count(para) for para in filtered]\n",
        "\n",
        "  if len(word_counts) ==0:\n",
        "\n",
        "    return 0, 0\n",
        "\n",
        "  mean = sum(word_counts) / len(word_counts)\n",
        "  variance = sum([((x - mean) ** 2) for x in word_counts]) / len(word_counts)\n",
        "  res = variance ** 0.5\n",
        "\n",
        "  return mean, res\n",
        "\n",
        "def sent_count_para(document):\n",
        "\n",
        "  tokens = document.splitlines()\n",
        "\n",
        "  nonPunct = re.compile('.*[A-Za-z0-9].*')  # must contain a letter or digit\n",
        "  \n",
        "  filtered = [w for w in tokens if nonPunct.match(w)]\n",
        "\n",
        "  sent_counts = [sentence_count(para) for para in filtered]\n",
        "\n",
        "  if len(sent_counts) ==0:\n",
        "\n",
        "    return 0, 0\n",
        "\n",
        "  mean = sum(sent_counts) / len(sent_counts)\n",
        "  variance = sum([((x - mean) ** 2) for x in sent_counts]) / len(sent_counts)\n",
        "  res = variance ** 0.5\n",
        "\n",
        "  return mean, res"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "0rh8VKbwrQza"
      },
      "source": [
        "## Punctuation Analysis"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "XcMPvG-vrVuA"
      },
      "outputs": [],
      "source": [
        "import string \n",
        "\n",
        "def total_punc_count(document):\n",
        "  \n",
        "  punct_count = 0\n",
        "\n",
        "  for char in document:\n",
        "    \n",
        "    if char in string.punctuation:\n",
        "\n",
        "      punct_count +=1\n",
        "  \n",
        "  return punct_count"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "DVsRrdwozUPC"
      },
      "outputs": [],
      "source": [
        "import string \n",
        "\n",
        "def special_punc_count(document, special_puncts):\n",
        "  \n",
        "  punct_count = []\n",
        "\n",
        "  for punct in special_puncts:\n",
        "    \n",
        "    punct_count.append(document.count(punct))\n",
        "  \n",
        "  total_puncts = total_punc_count(document)\n",
        "  if total_puncts==0:\n",
        "    return [0 for count in punct_count]\n",
        "  else:\n",
        "    return [float(count)/ total_puncts for count in punct_count]"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 36
        },
        "id": "SecQdXemn1Fp",
        "outputId": "d4893030-f238-480c-d75f-25225e0ff9a9"
      },
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "'3.4.4'"
            ],
            "application/vnd.google.colaboratory.intrinsic+json": {
              "type": "string"
            }
          },
          "metadata": {},
          "execution_count": 4
        }
      ],
      "source": [
        "spacy.__version__"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "ZgQPaoUGCihb"
      },
      "outputs": [],
      "source": [
        "\n",
        "def special_punc_count_sent(document, special_puncts):\n",
        "\n",
        "  tokens = sent_tokenize(document)\n",
        "\n",
        "  nonPunct = re.compile('.*[A-Za-z0-9].*')  # must contain a letter or digit\n",
        "  \n",
        "  filtered = [w for w in tokens if nonPunct.match(w)]\n",
        "\n",
        "  punct_count = [0 for i in special_puncts] # Init as 0 \n",
        "\n",
        "  if not filtered:\n",
        "    return punct_count\n",
        "\n",
        "  for sent in filtered:\n",
        "\n",
        "    for punct in special_puncts:\n",
        "      \n",
        "      punct_count[special_puncts.index(punct)] += sent.count(punct)\n",
        "    \n",
        "  return [float(count)/ len(filtered) for count in punct_count]\n",
        "\n",
        "\n",
        "def special_punc_count_para(document, special_puncts):\n",
        "\n",
        "  tokens = document.splitlines()\n",
        "\n",
        "  nonPunct = re.compile('.*[A-Za-z0-9].*')  # must contain a letter or digit\n",
        "  \n",
        "  filtered = [w for w in tokens if nonPunct.match(w)]\n",
        "\n",
        "  punct_count = [0 for i in special_puncts] # Init as 0 \n",
        "\n",
        "  if not filtered:\n",
        "    return punct_count\n",
        "\n",
        "  for para in filtered:\n",
        "\n",
        "    for punct in special_puncts:\n",
        "      \n",
        "      punct_count[special_puncts.index(punct)] += para.count(punct)\n",
        "    \n",
        "  return [float(count)/ len(filtered) for count in punct_count]"
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "## Journalism Standard Analysis"
      ],
      "metadata": {
        "id": "WCFzyKrnXpWb"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "pip install datefinder"
      ],
      "metadata": {
        "id": "yAgLwGauw7m2",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "5c42dda2-c9e0-4183-e9ff-33ad08cc0147"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Collecting datefinder\n",
            "  Downloading datefinder-0.7.3-py2.py3-none-any.whl (10 kB)\n",
            "Requirement already satisfied: pytz in /usr/local/lib/python3.9/dist-packages (from datefinder) (2022.7.1)\n",
            "Requirement already satisfied: regex>=2017.02.08 in /usr/local/lib/python3.9/dist-packages (from datefinder) (2022.10.31)\n",
            "Requirement already satisfied: python-dateutil>=2.4.2 in /usr/local/lib/python3.9/dist-packages (from datefinder) (2.8.2)\n",
            "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.9/dist-packages (from python-dateutil>=2.4.2->datefinder) (1.16.0)\n",
            "Installing collected packages: datefinder\n",
            "Successfully installed datefinder-0.7.3\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Y67ChzdkvWx-"
      },
      "outputs": [],
      "source": [
        "import datefinder\n",
        "\n",
        "def word_count_lead_sent(document):\n",
        "\n",
        "  tokens = sent_tokenize(document)\n",
        "\n",
        "  nonPunct = re.compile('.*[A-Za-z0-9].*')  # must contain a letter or digit\n",
        "  \n",
        "  filtered = [w for w in tokens if nonPunct.match(w)]\n",
        "\n",
        "  return word_count(filtered[0])\n",
        "\n",
        "\n",
        "def word_count_lead_para(document):\n",
        "\n",
        "  tokens = document.splitlines()\n",
        "\n",
        "  nonPunct = re.compile('.*[A-Za-z0-9].*')  # must contain a letter or digit\n",
        "  \n",
        "  filtered = [w for w in tokens if nonPunct.match(w)]\n",
        "\n",
        "  return word_count(filtered[0])\n",
        "\n",
        "  \n",
        "def number_count(document):\n",
        "\n",
        "  total_digits = 0 \n",
        "\n",
        "  tokens = word_tokenize(document)\n",
        "\n",
        "  nonPunct = re.compile('.*[A-Za-z0-9].*')  # must contain a letter or digit\n",
        "  \n",
        "  filtered = [w for w in tokens if nonPunct.match(w)]\n",
        "\n",
        "  for s in filtered:\n",
        "\n",
        "    if s.isnumeric():\n",
        "      \n",
        "      try:\n",
        "        num = int(s)\n",
        "\n",
        "        if num <9:\n",
        "          \n",
        "          total_digits += 1\n",
        "\n",
        "      except:\n",
        "\n",
        "        pass\n",
        "\n",
        "  return total_digits\n",
        "\n",
        "\n",
        "def number_count_para(document, avg=True):\n",
        "\n",
        "  tokens = document.splitlines()\n",
        "\n",
        "  nonPunct = re.compile('.*[A-Za-z0-9].*')  # must contain a letter or digit\n",
        "  \n",
        "  filtered = [w for w in tokens if nonPunct.match(w)]\n",
        "\n",
        "  num_counts = [number_count(para) for para in filtered]\n",
        "  \n",
        "\n",
        "  if avg:\n",
        "    if len(num_counts) ==0:\n",
        "\n",
        "      return 0, 0\n",
        "\n",
        "    mean = sum(num_counts) / len(num_counts)\n",
        "\n",
        "    return mean\n",
        "\n",
        "  else:\n",
        "    \n",
        "    return sum(num_counts)\n",
        "\n",
        "\n",
        "def passive_sent_count(document, avg=True):\n",
        "\n",
        "  tokens = sent_tokenize(document)\n",
        "\n",
        "  nonPunct = re.compile('.*[A-Za-z0-9].*')  # must contain a letter or digit\n",
        "  \n",
        "  filtered = [w for w in tokens if nonPunct.match(w)]\n",
        "\n",
        "  passive_count = [is_passive(sent) for sent in filtered]\n",
        "\n",
        "  if avg:\n",
        "    return sum(passive_count)/len(passive_count)\n",
        "  else:\n",
        "    return sum(passive_count)\n",
        "\n",
        "\n",
        "def past_sent_count(document, avg=True):\n",
        "\n",
        "  tokens = sent_tokenize(document)\n",
        "\n",
        "  nonPunct = re.compile('.*[A-Za-z0-9].*')  # must contain a letter or digit\n",
        "  \n",
        "  filtered = [w for w in tokens if nonPunct.match(w)]\n",
        "\n",
        "  past_count = [is_past(sent) for sent in filtered]\n",
        "\n",
        "  if avg:\n",
        "    return sum(past_count)/len(past_count)\n",
        "  else:\n",
        "    return sum(past_count)\n",
        "\n",
        "\n",
        "# function to check the type of sentence\n",
        "def is_passive(inputSentence):   \n",
        "    # running the model on sentence\n",
        "    doc_file = nlp(inputSentence)\n",
        "    \n",
        "    # getting the syntactic dependency \n",
        "    all_tags = [token.dep_ for token in doc_file]\n",
        "\n",
        "    # checking for 'agent' tag\n",
        "    agent_test = any(['agent' in sublist for sublist in all_tags])\n",
        "    \n",
        "    # checking for 'nsubjpass' tag\n",
        "    nsubjpass_test = any(['nsubjpass' in sublist for sublist in all_tags])\n",
        "\n",
        "    return agent_test or nsubjpass_test\n",
        "\n",
        "\n",
        "# function to check the past tense words of sentence\n",
        "def is_past(inputSentence):   \n",
        "    # running the model on sentence\n",
        "\n",
        "    text = inputSentence.split()\n",
        "\n",
        "    tokens_tag = pos_tag(text)\n",
        "\n",
        "    all_tokens = [i[1] for i in tokens_tag]\n",
        "\n",
        "    if \"VBD\" in all_tokens or \"VBN\" in all_tokens:\n",
        "      return True\n",
        "    \n",
        "    return False\n",
        "\n",
        "def get_temporal_pharses(document):\n",
        "\n",
        "  matches = datefinder.find_dates(document, source=True, index=True)\n",
        "\n",
        "  return [match[1] for match in matches]\n",
        "\n",
        "\n",
        "def is_time(time_string, std = False):\n",
        "\n",
        "  if std:\n",
        "    time_postfix = [\"a.m.\", \"p.m.\"]\n",
        "\n",
        "  else:\n",
        "\n",
        "    time_string = time_string.lower()\n",
        "    time_postfix = [\"a.m.\", \"p.m.\", \"am\", \"pm\", \"am.\", \"pm.\", \"a.m\", \"p.m\"]\n",
        "\n",
        "  regex_dates = [r'{}'.format(word) for word in time_postfix]\n",
        "\n",
        "  for reg in regex_dates:\n",
        "\n",
        "    match = re.search(reg , time_string)\n",
        "    \n",
        "    if match:\n",
        "\n",
        "      return True\n",
        "  \n",
        "  return False\n",
        "\n",
        "def is_day(time_string, std = False):\n",
        "\n",
        "  if std:\n",
        "    day_postfix = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\n",
        "  else:\n",
        "\n",
        "    time_string = time_string.lower()\n",
        "    day_postfix = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\", \n",
        "                   \"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Thur\" \"Fri\", \"Sat\", \"Sun\"]\n",
        "\n",
        "  regex_dates = [r'{}'.format(word) for word in day_postfix]\n",
        "  \n",
        "  for reg in regex_dates:\n",
        "\n",
        "    match = re.search(reg , time_string)\n",
        "    \n",
        "    if match:\n",
        "\n",
        "      return True\n",
        "  \n",
        "  return False\n",
        "\n",
        "def is_month(time_string, std = False):\n",
        "\n",
        "  if std:\n",
        "\n",
        "    months = [\"March\", \"April\", \"May\", \"June\", \"July\", \n",
        "              \"Jan.\", \"Feb.\",\"Aug.\", \"Sept.\", \"Oct.\", \"Nov.\",  \"Dec.\"]\n",
        "\n",
        "  else:\n",
        "    time_string = time_string.lower()\n",
        "\n",
        "    months = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\", \n",
        "              \"Jan\", \"Feb\", \"Mar\",\"Apr\",\"May\",\"Jun\",\"Jul\",\"Aug\", \"Sept\", \"Oct\", \"Nov\",  \"Dec\"]\n",
        "\n",
        "\n",
        "  regex_dates = [r'{}'.format(word) for word in months]\n",
        "  \n",
        "  for reg in regex_dates:\n",
        "\n",
        "    match = re.search(reg , time_string)\n",
        "    \n",
        "    if match:\n",
        "\n",
        "      return True\n",
        "  \n",
        "  return False\n",
        "\n",
        "\n",
        "def temporal_style_inconsit_count(document):\n",
        "  \n",
        "  temp_obj_list = get_temporal_pharses(document)\n",
        "\n",
        "  violation_count = 0\n",
        "\n",
        "  for temp_obj in temp_obj_list:\n",
        "\n",
        "    if is_time(temp_obj):\n",
        "\n",
        "      if not is_time(temp_obj, std=True):\n",
        "\n",
        "        violation_count +=1\n",
        "    \n",
        "    if is_day(temp_obj):\n",
        "\n",
        "      if not is_day(temp_obj, std=True):\n",
        "\n",
        "        violation_count +=1\n",
        "    \n",
        "    if is_month(temp_obj):\n",
        "\n",
        "      if not is_month(temp_obj, std=True):\n",
        "\n",
        "        violation_count +=1\n",
        "  \n",
        "  return violation_count"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "string_with_dates = '''\n",
        "    Central design committee session Tuesday 10/22 6:30 pm\n",
        "    Th 9/19 LAB: Serial encoding (Section 2.2)\n",
        "    There will be another one on December 15th for those who are unable to make it today.\n",
        "    Workbook 3 (Minimum Wage): due Wednesday 9/18 11:59pm\n",
        "    He will be flying in Sept. 15th.\n",
        "    We expect to deliver Thu. 05 this between late 2021 and early 2022 6 a.m.\n",
        "'''\n",
        "\n",
        "temporal_style_inconsit_count(string_with_dates)"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "4lg4g4guHeOC",
        "outputId": "c0e67dd9-aee9-430e-9431-380e915c60d2"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "3"
            ]
          },
          "metadata": {},
          "execution_count": 26
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "imO_l7G4Gbm8"
      },
      "source": [
        "## Feature Exatrction"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "hGgMcA-cpYSk"
      },
      "source": [
        "### Combined function to get journalism features "
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "2dnchUVepuvQ"
      },
      "outputs": [],
      "source": [
        "from tqdm.notebook import tqdm \n",
        "\n",
        "\n",
        "def get_features(data):\n",
        "\n",
        "  global phraseology_features\n",
        "  global special_puncts\n",
        "\n",
        "  global special_punct_names \n",
        "\n",
        "  global punct_analysis_features \n",
        "\n",
        "  global style_guide_features \n",
        "\n",
        "  global frame_cols\n",
        "\n",
        "  global data_features\n",
        "\n",
        "  phraseology_features = [\"word_count\", \"sent_count\", \"para_count\", \"mean_word_count_sent\", \"std_word_count_sent\",\"mean_word_count_para\", \"std_word_count_para\", \"mean_sent_count_para\", \"std_sent_count_para\"]\n",
        "  special_puncts = [\"!\",\"'\", \",\", \"-\", \":\", \";\", \"?\", \"@\", \"\\\"\", \"=\", \"#\"]\n",
        "\n",
        "  special_punct_names = [\"excla\",\"apos\", \"comma\", \"hypn\", \"col\", \"semicol\", \"ques\", \"at\", \"qot\", \"dhypn\", \"hash\"]\n",
        "\n",
        "  punct_analysis_features = [\"total_punct_count\"]\n",
        "\n",
        "  style_guide_features = [\"wc_lead_sent\", \"wc_lead_para\", \"num_count\", \"passive_sent_count\", \"past_tense_count\", \"temp_inconsis\"]\n",
        "\n",
        "  frame_cols = []\n",
        "\n",
        "  data_features = []\n",
        "\n",
        "  data_features = []\n",
        "\n",
        "  for punct in special_punct_names:\n",
        "\n",
        "    punct_analysis_features.append(punct + \"_mean_count\")\n",
        "\n",
        "  for punct in special_punct_names:\n",
        "\n",
        "    punct_analysis_features.append(punct + \"_mean_count_sent\")\n",
        "\n",
        "  for punct in special_punct_names:\n",
        "\n",
        "    punct_analysis_features.append(punct + \"_mean_count_para\")\n",
        "\n",
        "\n",
        "  for value in tqdm(data.itertuples()):\n",
        "\n",
        "    document = str(value.text)\n",
        "\n",
        "    if not document:\n",
        "\n",
        "      document = \"empty\"\n",
        "\n",
        "    feature_row = []\n",
        "    ## phraseology features\n",
        "    # print(document)\n",
        "    feature_row.append(word_count(document))\n",
        "    feature_row.append(sentence_count(document))\n",
        "    feature_row.append(paragraph_count(document))\n",
        "\n",
        "    # word count per sentence\n",
        "    word_count_vals = word_count_sent(document)\n",
        "    feature_row.append(word_count_vals[0])\n",
        "    feature_row.append(word_count_vals[1])\n",
        "\n",
        "    # word count per paragraph\n",
        "    word_count_vals = word_count_para(document)\n",
        "    feature_row.append(word_count_vals[0])\n",
        "    feature_row.append(word_count_vals[1])\n",
        "\n",
        "    # sentence count per paragraph\n",
        "    sent_count_vals = sent_count_para(document)\n",
        "    feature_row.append(sent_count_vals[0])\n",
        "    feature_row.append(sent_count_vals[1])\n",
        "\n",
        "    ## punctuation features\n",
        "    feature_row.append(total_punc_count(document))\n",
        "    feature_row.extend(special_punc_count(document, special_puncts))\n",
        "    feature_row.extend(special_punc_count_sent(document, special_puncts))\n",
        "    feature_row.extend(special_punc_count_para(document, special_puncts))\n",
        "\n",
        "\n",
        "    ## Style guide features \n",
        "    feature_row.append(word_count_lead_sent(document))\n",
        "    feature_row.append(word_count_lead_para(document))\n",
        "    feature_row.append(number_count_para(document))\n",
        "    feature_row.append(passive_sent_count(document))\n",
        "    feature_row.append(past_sent_count(document))\n",
        "    feature_row.append(temporal_style_inconsit_count(document))\n",
        "\n",
        "    # append label\n",
        "    # feature_row.append(value.label)\n",
        "    data_features.append(feature_row)\n",
        "\n",
        "  frame_cols = phraseology_features.copy()\n",
        "  frame_cols.extend(punct_analysis_features)\n",
        "  frame_cols.extend(style_guide_features)\n",
        "  # frame_cols.append('label')\n",
        "\n",
        "  # print(\"length of feature vector (column names) \")\n",
        "  # print(len(frame_cols))\n",
        "  print(frame_cols)\n",
        "\n",
        "  data_features = pd.DataFrame(data_features, columns=frame_cols)\n",
        "  return data_features"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "provenance": [],
      "collapsed_sections": [
        "QBUlyKGApQrn",
        "9zovyfxt2BIi",
        "TiDR2aJEJAQT"
      ]
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}