{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "3a96c50f-7825-49f2-b4f1-73a9a21ba695",
   "metadata": {},
   "outputs": [],
   "source": [
    "import findspark"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "90c3c1c7-fd57-4fab-b709-c83fa0d92ce4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Help on function init in module findspark:\n",
      "\n",
      "init(spark_home=None, python_path=None, edit_rc=False, edit_profile=False)\n",
      "    Make pyspark importable.\n",
      "    \n",
      "    Sets environment variables and adds dependencies to sys.path.\n",
      "    If no Spark location is provided, will try to find an installation.\n",
      "    \n",
      "    Parameters\n",
      "    ----------\n",
      "    spark_home : str, optional, default = None\n",
      "        Path to Spark installation, will try to find automatically\n",
      "        if not provided.\n",
      "    python_path : str, optional, default = None\n",
      "        Path to Python for Spark workers (PYSPARK_PYTHON),\n",
      "        will use the currently running Python if not provided.\n",
      "    edit_rc : bool, optional, default = False\n",
      "        Whether to attempt to persist changes by appending to shell\n",
      "        config.\n",
      "    edit_profile : bool, optional, default = False\n",
      "        Whether to create an IPython startup file to automatically\n",
      "        configure and import pyspark.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "help(findspark.init)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "df29fb6c-d0c6-4802-af48-23211e645c43",
   "metadata": {},
   "outputs": [],
   "source": [
    "findspark.init()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "95ef4590-8b2b-4e61-a955-e586d8691952",
   "metadata": {},
   "outputs": [],
   "source": [
    "from pyspark.sql import SparkSession"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "83ece8d8-6474-4bb9-85f4-fa09694b2dab",
   "metadata": {},
   "outputs": [],
   "source": [
    "spark = (SparkSession\n",
    "         .builder\n",
    "         .appName(\"analysis\")\n",
    "         .getOrCreate()\n",
    "        )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "987d0a26-f92e-461e-83ca-c7c5e8ef2af8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------------------------------------------------------------------+\n",
      "|value                                                               |\n",
      "+--------------------------------------------------------------------+\n",
      "|The Project Gutenberg EBook of Pride and Prejudice, by Jane Austen  |\n",
      "|                                                                    |\n",
      "|This eBook is for the use of anyone anywhere at no cost and with    |\n",
      "|almost no restrictions whatsoever.  You may copy it, give it away or|\n",
      "|re-use it under the terms of the Project Gutenberg License included |\n",
      "+--------------------------------------------------------------------+\n",
      "only showing top 5 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "book = spark.read.text(r\"data\\1342-0.txt\")\n",
    "book.show(5,truncate=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "d2323391-b48c-44ea-a344-a35082e683bb",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "DataFrame[value: string]"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "book"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "527f6c03-ca46-4cab-9d4f-cbf5ea9f2eb1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "root\n",
      " |-- value: string (nullable = true)\n",
      "\n"
     ]
    }
   ],
   "source": [
    "book.printSchema()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "e448ab4f-d641-4a8a-ae35-64dcb695485b",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[('value', 'string')]"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "book.dtypes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "2a8e2188-94ad-4a73-8044-c8f3429c3636",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\u001b[1;31mSignature:\u001b[0m\n",
       "\u001b[0mbook\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m\n",
       "\u001b[0m    \u001b[0mn\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mint\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;36m20\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\n",
       "\u001b[0m    \u001b[0mtruncate\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mUnion\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mbool\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mint\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mTrue\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\n",
       "\u001b[0m    \u001b[0mvertical\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mbool\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mFalse\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\n",
       "\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
       "\u001b[1;31mDocstring:\u001b[0m\n",
       "Prints the first ``n`` rows to the console.\n",
       "\n",
       ".. versionadded:: 1.3.0\n",
       "\n",
       ".. versionchanged:: 3.4.0\n",
       "    Supports Spark Connect.\n",
       "\n",
       "Parameters\n",
       "----------\n",
       "n : int, optional\n",
       "    Number of rows to show.\n",
       "truncate : bool or int, optional\n",
       "    If set to ``True``, truncate strings longer than 20 chars by default.\n",
       "    If set to a number greater than one, truncates long strings to length ``truncate``\n",
       "    and align cells right.\n",
       "vertical : bool, optional\n",
       "    If set to ``True``, print output rows vertically (one line\n",
       "    per column value).\n",
       "\n",
       "Examples\n",
       "--------\n",
       ">>> df = spark.createDataFrame([\n",
       "...     (14, \"Tom\"), (23, \"Alice\"), (16, \"Bob\")], [\"age\", \"name\"])\n",
       "\n",
       "Show only top 2 rows.\n",
       "\n",
       ">>> df.show(2)\n",
       "+---+-----+\n",
       "|age| name|\n",
       "+---+-----+\n",
       "| 14|  Tom|\n",
       "| 23|Alice|\n",
       "+---+-----+\n",
       "only showing top 2 rows\n",
       "\n",
       "Show :class:`DataFrame` where the maximum number of characters is 3.\n",
       "\n",
       ">>> df.show(truncate=3)\n",
       "+---+----+\n",
       "|age|name|\n",
       "+---+----+\n",
       "| 14| Tom|\n",
       "| 23| Ali|\n",
       "| 16| Bob|\n",
       "+---+----+\n",
       "\n",
       "Show :class:`DataFrame` vertically.\n",
       "\n",
       ">>> df.show(vertical=True)\n",
       "-RECORD 0-----\n",
       "age  | 14\n",
       "name | Tom\n",
       "-RECORD 1-----\n",
       "age  | 23\n",
       "name | Alice\n",
       "-RECORD 2-----\n",
       "age  | 16\n",
       "name | Bob\n",
       "\u001b[1;31mFile:\u001b[0m      d:\\appdata\\local\\mambaforge\\envs\\python39ml\\lib\\site-packages\\pyspark\\sql\\dataframe.py\n",
       "\u001b[1;31mType:\u001b[0m      method"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "book.show?"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "65e9e950-beef-4a4a-8358-09fdb5f0c54a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-RECORD 0---------------------------------------------------------------------\n",
      " value | The Project Gutenberg EBook of Pride and Prejudice, by Jane Austen   \n",
      "-RECORD 1---------------------------------------------------------------------\n",
      " value |                                                                      \n",
      "-RECORD 2---------------------------------------------------------------------\n",
      " value | This eBook is for the use of anyone anywhere at no cost and with     \n",
      "-RECORD 3---------------------------------------------------------------------\n",
      " value | almost no restrictions whatsoever.  You may copy it, give it away or \n",
      "-RECORD 4---------------------------------------------------------------------\n",
      " value | re-use it under the terms of the Project Gutenberg License included  \n",
      "only showing top 5 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "book.show(5,truncate=False,vertical=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "5b1087e4-0c9a-424e-b193-0296315b50a2",
   "metadata": {},
   "outputs": [],
   "source": [
    "from pyspark.sql.functions import col,split"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "936964ab-7e63-4a52-835a-a29e7ae6719c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------------------+\n",
      "|                line|\n",
      "+--------------------+\n",
      "|[The, Project, Gu...|\n",
      "|                  []|\n",
      "|[This, eBook, is,...|\n",
      "|[almost, no, rest...|\n",
      "|[re-use, it, unde...|\n",
      "+--------------------+\n",
      "only showing top 5 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "lines = book.select(split(col(\"value\"),\" \").alias(\"line\"))\n",
    "lines.show(5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "74452ceb-0a05-4c0f-ba69-5383ee0cc1dc",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "root\n",
      " |-- line: array (nullable = true)\n",
      " |    |-- element: string (containsNull = false)\n",
      "\n"
     ]
    }
   ],
   "source": [
    "lines.printSchema()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "a85be5d6-f167-4ebf-a617-8652a25dc17b",
   "metadata": {},
   "outputs": [],
   "source": [
    "from pyspark.sql.functions import explode,lower"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "38d7229d-af6c-4ac6-92bb-f9bdab060f5d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+----------+\n",
      "|      word|\n",
      "+----------+\n",
      "|       The|\n",
      "|   Project|\n",
      "| Gutenberg|\n",
      "|     EBook|\n",
      "|        of|\n",
      "|     Pride|\n",
      "|       and|\n",
      "|Prejudice,|\n",
      "|        by|\n",
      "|      Jane|\n",
      "|    Austen|\n",
      "|      This|\n",
      "|     eBook|\n",
      "|        is|\n",
      "|       for|\n",
      "|       the|\n",
      "|       use|\n",
      "|        of|\n",
      "|    anyone|\n",
      "|  anywhere|\n",
      "+----------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "words  = (lines.select(explode(col(\"line\")).alias(\"word\"))\n",
    "        .filter(col('word')!=''))\n",
    "words.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "01d36008-901e-49ec-a733-8723af85031c",
   "metadata": {},
   "outputs": [],
   "source": [
    "from pyspark.sql.functions import regexp_extract,lower"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "0a64a86b-b6b1-4d2a-88b1-15f83559c714",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+---------+\n",
      "|     word|\n",
      "+---------+\n",
      "|      the|\n",
      "|  project|\n",
      "|gutenberg|\n",
      "|    ebook|\n",
      "|       of|\n",
      "|    pride|\n",
      "|      and|\n",
      "|prejudice|\n",
      "|       by|\n",
      "|     jane|\n",
      "|   austen|\n",
      "|     this|\n",
      "|    ebook|\n",
      "|       is|\n",
      "|      for|\n",
      "|      the|\n",
      "|      use|\n",
      "|       of|\n",
      "|   anyone|\n",
      "| anywhere|\n",
      "+---------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "words  = (words.select(lower(col(\"word\")).alias('word'))\n",
    "          .select(regexp_extract(col(\"word\"),'[a-z]+',0).alias('word')))\n",
    "words.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "81c4fc1a-e02a-4f34-9fef-58c131c4f5b9",
   "metadata": {},
   "source": [
    "exercise 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "d5c2299f-d2e7-4dc8-bd54-195d7f6c0cfa",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "root\n",
      " |-- one: string (nullable = true)\n",
      " |-- two: string (nullable = true)\n",
      " |-- three: long (nullable = true)\n",
      "\n"
     ]
    }
   ],
   "source": [
    "exo2_2_df = spark.createDataFrame(\n",
    "    [[\"test\",\"more test\",10_000_000_000]],\n",
    "    ['one','two','three']\n",
    ")\n",
    "exo2_2_df.printSchema()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "695eeaa5-0eb6-4131-b988-cb62e3d21635",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sum([ 1 for (_,dtype)in exo2_2_df.dtypes if dtype != 'string'])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cb075288-a755-4fd8-ba1d-cd1a8c8106bb",
   "metadata": {},
   "source": [
    "exercise2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "824795f9-dff8-4045-95ce-145983ed8b9f",
   "metadata": {},
   "outputs": [],
   "source": [
    "from pyspark.sql.functions import length"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "31b67b99-8e86-41b5-93c8-aaaa6a73b3ac",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------------+\n",
      "|number_of_char|\n",
      "+--------------+\n",
      "|            66|\n",
      "|             0|\n",
      "|            64|\n",
      "|            68|\n",
      "|            67|\n",
      "+--------------+\n",
      "only showing top 5 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "exo2_3_df = (\n",
    "    spark.read.text(r\"data\\1342-0.txt\")\n",
    "    .select(length(col('value')).alias('number_of_char'))\n",
    "            \n",
    ")\n",
    "exo2_3_df.show(5)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c17f7ad1-a509-40a0-8f6c-60f7140bad1e",
   "metadata": {},
   "source": [
    "exercise3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "495ffca3-c4f2-4648-8b58-a22d542950c9",
   "metadata": {},
   "outputs": [],
   "source": [
    "from pyspark.sql.functions import col,greatest\n",
    "from pyspark.sql.utils import AnalysisException"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "df1adecb-6e3d-4dee-a271-696b66f1082a",
   "metadata": {},
   "outputs": [],
   "source": [
    "exo2_4_df = spark.createDataFrame(\n",
    "    [[\"key1\",10_000,20_000],[\"key2\",40_000,30_000]],\n",
    "    [\"key\",\"value1\",\"value2\"]\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "8a7b8c70-4d35-4be4-a04e-d6604a911b64",
   "metadata": {},
   "outputs": [],
   "source": [
    "try:\n",
    "    exo2_4_mod = (\n",
    "        exo2_4_df.select(\"key\",greatest(col(\"value1\"),col(\"value2\")).alias(\"maximum_value\"))\n",
    "        .select(\"key\",\"maximum_value\")\n",
    "                 )\n",
    "except AnalysisException as err:\n",
    "    print(err)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "46a8b1d0-b1b3-41c1-98f5-d3e2cc3a838c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+----+-------------+\n",
      "| key|maximum_value|\n",
      "+----+-------------+\n",
      "|key1|        20000|\n",
      "|key2|        40000|\n",
      "+----+-------------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "exo2_4_mod.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "54e3b7d7-7b95-450e-8df5-32b083dab91c",
   "metadata": {},
   "source": [
    "exercise4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "7d939c78-3d1e-46c5-b924-3d53e7b63646",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+------------+\n",
      "|        word|\n",
      "+------------+\n",
      "|     project|\n",
      "|   gutenberg|\n",
      "|       ebook|\n",
      "|       pride|\n",
      "|   prejudice|\n",
      "|        jane|\n",
      "|      austen|\n",
      "|        this|\n",
      "|       ebook|\n",
      "|      anyone|\n",
      "|    anywhere|\n",
      "|        cost|\n",
      "|        with|\n",
      "|      almost|\n",
      "|restrictions|\n",
      "|  whatsoever|\n",
      "|        copy|\n",
      "|        give|\n",
      "|        away|\n",
      "|       under|\n",
      "+------------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "(\n",
    "    words.filter(col(\"word\") != \"is\")\n",
    "    .filter(length(col(\"word\"))>3)\n",
    "    .show()\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "baae5cc6-7dc6-4cf4-bf95-3cd3ba45db7f",
   "metadata": {},
   "source": [
    "exercise5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "ce2f29dd-8b02-49f6-8472-44e4d48d4d0a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+---------+\n",
      "|     word|\n",
      "+---------+\n",
      "|  project|\n",
      "|gutenberg|\n",
      "|    ebook|\n",
      "|       of|\n",
      "|    pride|\n",
      "+---------+\n",
      "only showing top 5 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "(\n",
    "    words.filter(~col(\"word\").isin([\"is\",\"not\",\"the\",\"if\"]))\n",
    "    .show(5)\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "3513ff4e-7e40-43a2-8ee0-e3e3ec774ae5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+----+-----+\n",
      "|word|count|\n",
      "+----+-----+\n",
      "| the| 4496|\n",
      "|  to| 4235|\n",
      "|  of| 3719|\n",
      "| and| 3602|\n",
      "| her| 2223|\n",
      "|   i| 2052|\n",
      "|   a| 1997|\n",
      "|  in| 1920|\n",
      "| was| 1844|\n",
      "| she| 1703|\n",
      "+----+-----+\n",
      "only showing top 10 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "res = (\n",
    "    words.groupby(\"word\").count()\n",
    "    .orderBy(col(\"count\").desc())\n",
    ")\n",
    "res.show(10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "3de149c1-46e0-462a-8d19-494627b29bef",
   "metadata": {},
   "outputs": [],
   "source": [
    "# res.coalesce(1).write.csv(\"simple_count.csv\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "39fff3b0-21dd-482d-b760-e71b21a26675",
   "metadata": {},
   "outputs": [],
   "source": [
    "spark.stop()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4649bf06-1048-4718-9318-b5d9cc237bca",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
