{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "ceebd81c-5895-4733-924d-12edf20c7bd5",
   "metadata": {},
   "source": [
    "# Chapter 5: Sentiment analysis with the Perceptron algorithm"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c0c404c7-c5b8-4d23-af33-6644cf5af57b",
   "metadata": {},
   "source": [
    "## Importing packages"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "d4016f6a-2eba-4b18-9072-2f8dc879b864",
   "metadata": {},
   "outputs": [],
   "source": [
    "from matplotlib import pyplot as plt\n",
    "import numpy\n",
    "import tqdm"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6e1241d6-19dd-40c4-b4d8-db0ba62ff2e8",
   "metadata": {},
   "source": [
    "## Plotting functions\n",
    "\n",
    "\n",
    "We've extended the functionality of our previous plotting functions to include more arguments. As before, we don't use `plt.show()` because there are times when combine these."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "6f6bf88f-af30-4c84-b4fe-a66724847f09",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Helpers (Plotting) =========================================\n",
    "\n",
    "def plot_scatter(x_iterable, y_iterable, x_label = \"\", y_label = \"\",  legend = None, **kwargs):\n",
    "    x_array = numpy.array(x_iterable)\n",
    "    y_array = numpy.array(y_iterable)\n",
    "    plt.xlabel(x_label)\n",
    "    plt.xlabel(y_label)\n",
    "    if legend is not None:\n",
    "        plt.legend(legend)\n",
    "    plt.scatter(x_array, y_array, **kwargs)\n",
    "        \n",
    "def draw_line(slope, y_intercept, starting=0, ending=8, **kwargs):\n",
    "    x = numpy.linspace(starting, ending, 1000)\n",
    "    plt.plot(x, y_intercept + slope*x, **kwargs)\n",
    "    \n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ffe99f2a-f251-4094-aa89-00116f28621b",
   "metadata": {},
   "source": [
    "## Coding the perceptron trick\n",
    "\n",
    "The perceptron algorithm adjusts the line (plane) up or down until the two sets are properly classified. We'll need a few helper functions to score our line (plane) and adjust."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "01d48f1d-c7d1-4a3c-8292-5d4c780408e2",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Helpers (Perceptron) =======================================\n",
    "\n",
    "def calculate_score(array_feature, array_weights, bias):\n",
    "    \"\"\"\n",
    "    Utilizes the dot function because numpy allows\n",
    "    vector.dot(scalar) operations\n",
    "    \"\"\"\n",
    "    return array_feature.dot(array_weights) + bias\n",
    "\n",
    "def step(scalar):\n",
    "    if scalar >= 0:\n",
    "        return 1\n",
    "    else:\n",
    "        return 0\n",
    "\n",
    "def prediction(array_feature, array_weights, bias):\n",
    "    score = calculate_score(array_feature, array_weights, bias)\n",
    "    return step(score)\n",
    "\n",
    "def calculate_error(array_feature, array_weights, bias, label):\n",
    "    \"\"\" Correct predictions should have no effect on our adjustment score \"\"\"\n",
    "    pred = prediction(array_feature, array_weights, bias)\n",
    "    if pred == label:\n",
    "        return 0\n",
    "    else:\n",
    "        score = calculate_score(array_feature, array_weights, bias)\n",
    "        return numpy.abs(score)\n",
    "\n",
    "\n",
    "# Helpers (Metrics) ==========================================\n",
    "\n",
    "def calculate_mean_perceptron_error(array_features, array_weights, bias, array_labels):\n",
    "    \"\"\"\n",
    "    Mean error in this case measures how well the entire line (plane) splits the data\n",
    "    The lower, the better.\n",
    "    \"\"\"\n",
    "    assert array_features.shape[0] == array_labels.shape[0]\n",
    "\n",
    "    total_error = 0\n",
    "    for feature, label in zip(array_features, array_labels):\n",
    "        total_error += calculate_error(feature, array_weights, bias, label)\n",
    "    return total_error/array_features.shape[0]\n",
    "\n",
    "# Model ======================================================\n",
    "\n",
    "def perceptron_trick(array_feature, array_weights, bias, label, learning_rate = 0.01):\n",
    "    \"\"\"\n",
    "    Perceptron trick v1.\n",
    "    Updates the weights, bias by the learning rate\n",
    "\n",
    "    If a point is misclassified above the line:\n",
    "        new weights = old weights - learning_rate * feature\n",
    "        bias -= learning rate\n",
    "\n",
    "    If a point is misclassified below the line:\n",
    "        new weights = old weights + learning_rate * feature\n",
    "        bias += learning rate\n",
    "    \"\"\"\n",
    "\n",
    "    pred = prediction(array_feature, array_weights, bias)\n",
    "    if pred == label:\n",
    "        return array_weights, bias\n",
    "    else:\n",
    "        if label==1 and pred==0:\n",
    "            array_weights = numpy.add(\n",
    "                array_weights, array_feature*learning_rate)\n",
    "            bias += learning_rate\n",
    "        elif label==0 and pred==1:\n",
    "            array_weights = numpy.subtract(\n",
    "                array_weights, array_feature*learning_rate)\n",
    "            bias -= learning_rate\n",
    "\n",
    "    return array_weights, bias\n",
    "\n",
    "def perceptron_trick(array_feature, array_weights, bias, label, learning_rate = 0.01):\n",
    "    \"\"\"\n",
    "    Perceptron trick v2.\n",
    "    Shorter version of the perceptron trick taking full advantage of the fact:\n",
    "        new weights = old weights + learning_rate * (label – prediction) * feature\n",
    "        bias += learning_rate * (label – prediction)\n",
    "    \"\"\"\n",
    "\n",
    "    pred = prediction(array_feature, array_weights, bias)\n",
    "    array_weights = numpy.add(\n",
    "        array_weights, (label-pred)*array_feature*learning_rate\n",
    "        )\n",
    "    bias += (label-pred)*learning_rate\n",
    "\n",
    "    return array_weights, bias"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "494899a2-f458-44a1-aafd-bb9508abdcd0",
   "metadata": {},
   "source": [
    "## Running the full perceptron algorithm\n",
    "\n",
    "We'll continually adjust our line until we reach convergence (or exhaust our iterations)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "017adde5-cf7b-4164-9286-13a343929f0e",
   "metadata": {},
   "outputs": [],
   "source": [
    "def perceptron_algorithm(array_features, array_labels, learning_rate = 0.01, num_epochs = 200):\n",
    "    \"\"\"\n",
    "    Loop breaks when converges or if num_epochs is reached\n",
    "\n",
    "    Stores the best weights and bias in case of non-convergence\n",
    "    \"\"\"\n",
    "    assert array_features.shape[0] == array_labels.shape[0]\n",
    "\n",
    "    array_weights = numpy.ones(shape = array_features.shape[1])\n",
    "    bias = 0.0\n",
    "    best_weights = None\n",
    "    best_bias = None\n",
    "\n",
    "    # base case\n",
    "    count = 0\n",
    "    error = calculate_mean_perceptron_error(\n",
    "        array_features, array_weights, bias, array_labels)\n",
    "    iter_errors = [error]\n",
    "\n",
    "    progress_bar = tqdm.tqdm(total = num_epochs)\n",
    "    while (error >= 1e-16) and (count <= num_epochs):\n",
    "\n",
    "        error = calculate_mean_perceptron_error(\n",
    "            array_features, array_weights, bias, array_labels)\n",
    "\n",
    "        # Identifies best weights\n",
    "        if error < iter_errors[-1]:\n",
    "            best_weights = array_weights\n",
    "            best_bias = bias\n",
    "        iter_errors.append(error)\n",
    "\n",
    "        # Updates weights & bias\n",
    "        index = numpy.random.randint(0, array_features.shape[0] - 1)\n",
    "        array_weights, bias = perceptron_trick(\n",
    "            array_features[index], \n",
    "            array_weights, \n",
    "            bias, \n",
    "            array_labels[index],\n",
    "            learning_rate)\n",
    "\n",
    "        count +=1\n",
    "\n",
    "    progress_bar.close()\n",
    "\n",
    "    # Plotting error\n",
    "    plot_scatter(range(len(iter_errors)), iter_errors)\n",
    "    plt.title(\"Mean Perception Error per Iteration\")\n",
    "    plt.show()\n",
    "\n",
    "    return best_weights, best_bias"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e146d2e2-efa7-422b-b8cc-cc727e32775e",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
