{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "uRdcc6DkmAE9",
        "outputId": "2eb8e5dc-fb89-4a66-d46d-787f30ee6164"
      },
      "outputs": [],
      "source": [
        "# Install Java 8, Spark, findspark and pyspark\n",
        "!apt-get install openjdk-8-jdk-headless -qq > /dev/null\n",
        "!wget -q https://downloads.apache.org/spark/spark-3.1.2/spark-3.1.2-bin-hadoop2.7.tgz\n",
        "!tar -xf spark-3.1.2-bin-hadoop2.7.tgz\n",
        "!pip install -q findspark\n",
        "!pip install pyspark\n",
        "# Set env variables\n",
        "import os\n",
        "os.environ[\"JAVA_HOME\"] = \"/usr/lib/jvm/java-8-openjdk-amd64\"\n",
        "os.environ[\"SPARK_HOME\"] = \"/content/spark-3.1.2-bin-hadoop2.7\"\n",
        "os.environ['PYSPARK_SUBMIT_ARGS'] = '--packages graphframes:graphframes:0.8.1-spark3.0-s_2.12 pyspark-shell'\n",
        "# Initialize with findspark to import pyspark as a regular library\n",
        "import findspark\n",
        "findspark.init(\"spark-3.1.2-bin-hadoop2.7\")\n",
        "# Obtain a Spark session reference\n",
        "from pyspark.sql import SparkSession\n",
        "spark = SparkSession.builder.master(\"local[*]\").getOrCreate()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Mus32b1bws6Z"
      },
      "outputs": [],
      "source": [
        "from pyspark.sql.types import *\n",
        "from pyspark.sql import functions as F\n",
        "from pyspark.sql import Window\n",
        "from graphframes import *"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "L-0ZU1NkJSKN",
        "outputId": "a455ff0d-5472-48e7-9e30-b1487a309358"
      },
      "outputs": [],
      "source": [
        "!pip install plotly==5.1.0\n",
        "!pip install dash --upgrade\n",
        "!pip install jupyter-dash\n",
        "!pip install dash-cytoscape==0.2.0\n",
        "!pip install dash-bootstrap-components"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "pNAW0ZcgFyY9"
      },
      "outputs": [],
      "source": [
        "import plotly.express as px\n",
        "from jupyter_dash import JupyterDash\n",
        "import dash_core_components as dcc\n",
        "import dash_html_components as html\n",
        "from dash.dependencies import Input, Output\n",
        "import dash_cytoscape as cyto\n",
        "import dash_html_components as html\n",
        "import plotly.graph_objs as go\n",
        "import plotly\n",
        "import dash\n",
        "from dash.exceptions import PreventUpdate\n",
        "import dash_bootstrap_components as dbc"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "vv6QE3MrvDLn"
      },
      "outputs": [],
      "source": [
        "def page_rank(graph : GraphFrame, \n",
        "              tolerance = 10e-6, \n",
        "              n_iter = 100, \n",
        "              beta = 0.85, \n",
        "              verbose = True):\n",
        "  ''' Computes the PageRank for each node in the input graph.\n",
        "\n",
        "  This is the implementation of the PageRank original algorithm with \n",
        "  the teleportation technique using Spark DataFrames.\n",
        "\n",
        "  Parameters\n",
        "  ----------\n",
        "  graph: GraphFrame\n",
        "    A graph object\n",
        "  tolerance: positive number\n",
        "    Represents the tolerance threshold when checking the distance\n",
        "    between the pageRank at the previous step and the values at the\n",
        "    current step\n",
        "  n_iter: positive number\n",
        "    Maximum number of iterations. At least one between 'tolerance' and\n",
        "    'n_iter' must be provided.\n",
        "  beta: value between 0 and 1\n",
        "    Corresponds to the probability to follow an outgoing link,\n",
        "    the damping factor.\n",
        "  verbose: logical\n",
        "    If True displays messages on function status during execution\n",
        "\n",
        "  Returns\n",
        "  -------\n",
        "  A Spark dataframe containing all vertices with the corresponding\n",
        "  pageRank value\n",
        "  '''\n",
        "  from math import sqrt\n",
        "  # Arg validation\n",
        "  if (tolerance == None and n_iter == None):\n",
        "    raise ValueError(\"At least one argument between 'tolerance' and 'n_iter' must be set\")\n",
        "  if (beta > 1 or beta < 0):\n",
        "    raise ValueError(\"'beta' must be a value between 0 and 1\")\n",
        "  # Get the out degree of each node. Note: only nodes with at least 1 out edge are returned by this method\n",
        "  out_deg = graph.outDegrees\n",
        "  # Get the transition matrix in form of triples by applying a transformation to the edges df\n",
        "  if verbose:\n",
        "    print(\"Computing transition matrix...\")\n",
        "  transition_matrix = graph.edges \\\n",
        "  .join(out_deg.withColumnRenamed(\"id\", \"dst\"), \"dst\", how = \"left\")\n",
        "  transition_matrix = transition_matrix.withColumn(\"t_value\", (1 / F.col(\"outDegree\")*beta)) \\\n",
        "  .select(\"src\", \"dst\", \"t_value\").cache() # beta*M\n",
        "  # Obtain the total number of nodes\n",
        "  n_nodes = graph.vertices.count()\n",
        "  # Separate isolated nodes from connected nodes\n",
        "  if verbose:\n",
        "    print(\"Computing initial vectors for nodes...\")\n",
        "  # Get ids of isolated nodes\n",
        "  connected_t0 = graph.vertices.select(\"id\") \\\n",
        "    .join(graph.edges.select(F.col(\"src\").alias(\"id\")), \n",
        "          on = \"id\", how = \"leftsemi\") \\\n",
        "    .withColumn(\"rank\", F.lit(1 / n_nodes)).cache()\n",
        "  isolated_t0 = graph.vertices.select(\"id\") \\\n",
        "    .join(connected_t0, \"id\", how = \"leftanti\") \\\n",
        "    .withColumn(\"rank\", F.lit(1 / n_nodes)).cache()\n",
        "  isolated_nodes_n = isolated_t0.count()\n",
        "  sdiff_isolated = 0\n",
        "  if isolated_nodes_n > 0:\n",
        "    # Compute rank for isolated components and squared diff\n",
        "    isolated_t1 = isolated_t0.withColumn(\"pageRank\", F.lit((1-beta)*1/n_nodes))\n",
        "    sdiff_isolated = isolated_t1.withColumn(\"row\", F.lit(1)) \\\n",
        "    .withColumn(\"sdiff\", F.sum(F.pow(F.col(\"pageRank\") - F.col(\"rank\"), 2)).over(Window.partitionBy(\"row\"))) \\\n",
        "    .select(\"sdiff\").first()[0]\n",
        "    isolated_t1 = isolated_t1.drop(\"rank\").cache()\n",
        "  # Iterate\n",
        "  iteration = 0\n",
        "  if verbose:\n",
        "    print(\"Starting cycle\")\n",
        "  while True:\n",
        "    iteration += 1\n",
        "    if verbose:\n",
        "      print(\"Iteration: \" + str(iteration))\n",
        "    # Join transition matrix with connected_t0 (on dst)\n",
        "    t_connected_0 = transition_matrix.join(connected_t0.withColumnRenamed(\"id\", \"dst\"), \n",
        "                                            \"dst\", \n",
        "                                            how = \"left\")\n",
        "    # Compute rank for connected components\n",
        "    t_connected_1 = t_connected_0.withColumn(\"pageRank_i\", F.col(\"t_value\")*F.col(\"rank\"))\n",
        "    window = Window.partitionBy(\"id\").orderBy(\"id\")\n",
        "    connected_t1 = t_connected_1.select(t_connected_1.src.alias(\"id\"), \n",
        "                                        t_connected_1.pageRank_i) \\\n",
        "                                        .withColumn(\"row\", F.row_number().over(window)) \\\n",
        "                                        .withColumn(\"pageRank\", F.sum(F.col(\"pageRank_i\")) \\\n",
        "                                                    .over(window) + ((1-beta)*1/n_nodes)) \\\n",
        "                                        .drop(\"pageRank_i\") \\\n",
        "                                        .where(F.col(\"row\")==1).select(\"id\", \"pageRank\")                                   \n",
        "    # Compute distance\n",
        "    if iteration > 1:\n",
        "      sdiff_isolated = 0\n",
        "    delta = connected_t1.join(connected_t0, on = \"id\", how = \"left\") \\\n",
        "    .withColumn(\"row\", F.lit(1)) \\\n",
        "    .withColumn(\"delta\",\n",
        "                F.sum(F.pow(F.col(\"rank\") - F.col(\"pageRank\"), 2)).over(Window.partitionBy(\"row\"))) \\\n",
        "                .select(\"delta\").first()[0] + sdiff_isolated\n",
        "    delta = sqrt(delta)\n",
        "    # Check end conditions\n",
        "    max_iter_reached = True if n_iter is not None and iteration == n_iter else False\n",
        "    tol_ok = True if tolerance is not None and delta <= tolerance else False\n",
        "    if max_iter_reached or tol_ok:\n",
        "      if isolated_nodes_n > 0:\n",
        "        final = graph.vertices.join(connected_t1.union(isolated_t1), \"id\").persist()\n",
        "        isolated_t1.unpersist()\n",
        "      else:\n",
        "        final = graph.vertices.join(connected_t1, \"id\").persist()\n",
        "      if verbose:\n",
        "        print(\"Finished\")\n",
        "      transition_matrix.unpersist()\n",
        "      isolated_t0.unpersist()\n",
        "      connected_t0.unpersist()\n",
        "      return final\n",
        "    else:\n",
        "      # Swap\n",
        "      connected_t0 = connected_t1.withColumnRenamed(\"pageRank\", \"rank\")\n",
        "  return None"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "wuvwW2b3zCDq"
      },
      "outputs": [],
      "source": [
        "# Function for each worker (internal)\n",
        "def _topic_rank(tm, nodes, edges, topic, beta, n_iter, tolerance, verbose):\n",
        "  from math import sqrt\n",
        "  if verbose:\n",
        "    print(\"Worker '\" + topic + \"' started\")\n",
        "  # Initialization\n",
        "  topic_df = nodes.withColumn(\"intopic\", F.when(F.array_contains(F.col(\"genres\"), topic), 1).otherwise(0))\n",
        "  tm_mod = tm.join(topic_df.select(F.col(\"id\").alias(\"src\"), \"intopic\"), \"src\", how=\"left\").cache()\n",
        "  set_count = topic_df.where(F.col(\"intopic\") == 1).count()\n",
        "  topic_df = topic_df.withColumn(\"rank\", F.col(\"intopic\") / set_count) \\\n",
        "    .select(\"id\",\"intopic\", \"rank\")\n",
        "  # Separating connected components\n",
        "  connected_t0 = topic_df \\\n",
        "    .join(edges.withColumnRenamed(\"src\", \"id\"), \n",
        "          on = \"id\", how = \"leftsemi\").drop(\"intopic\")\n",
        "  isolated_t0 = topic_df \\\n",
        "    .join(connected_t0, \"id\", how = \"leftanti\").cache()\n",
        "  isolated_nodes_n = isolated_t0.count()\n",
        "  sdiff_isolated = 0\n",
        "  if isolated_nodes_n > 0:\n",
        "    # Compute rank for isolated components and squared diff\n",
        "    isolated_t1 = isolated_t0.withColumn(\"pageRank\", F.when(F.col(\"intopic\") == 0, 0).otherwise(F.lit((1-beta)*1/set_count)))\n",
        "    sdiff_isolated = isolated_t1.withColumn(\"row\", F.lit(1)) \\\n",
        "    .withColumn(\"sdiff\", F.sum(F.pow(F.col(\"pageRank\") - F.col(\"rank\"), 2)).over(Window.partitionBy(\"row\"))) \\\n",
        "    .select(\"sdiff\").first()[0]\n",
        "    isolated_t1 = isolated_t1.drop(\"rank\").drop(\"intopic\")\n",
        "    isolated_t0 = isolated_t0.drop(\"intopic\")\n",
        "  else:\n",
        "    isolated_t0 = isolated_t0.drop(\"intopic\")\n",
        "  # Iterate\n",
        "  iteration = 0\n",
        "  while True:\n",
        "    iteration += 1\n",
        "    if verbose:\n",
        "      print(\"Worker '\" + topic + \"': iteration \" + str(iteration))\n",
        "    # Join transition matrix with connected_t0 (on dst)\n",
        "    t_connected_0 = tm_mod.join(connected_t0.withColumnRenamed(\"id\", \"dst\"), \n",
        "                                        \"dst\", \n",
        "                                        how = \"left\")\n",
        "    # Compute rank for connected components\n",
        "    t_connected_1 = t_connected_0.withColumn(\"pageRank_i\", F.col(\"t_value\")*F.col(\"rank\"))\n",
        "    window = Window.partitionBy(\"id\").orderBy(\"id\")\n",
        "    connected_t1 = t_connected_1.select(t_connected_1.src.alias(\"id\"), \n",
        "                                        t_connected_1.intopic,\n",
        "                                        t_connected_1.pageRank_i) \\\n",
        "                                        .withColumn(\"row\", F.row_number().over(window)) \\\n",
        "                                        .withColumn(\"pageRank\", F.sum(F.col(\"pageRank_i\")).over(window))                                   \n",
        "    connected_t1 = connected_t1 \\\n",
        "      .where(F.col(\"row\")==1).select(\"id\", \"intopic\", \"pageRank\") \\\n",
        "      .withColumn(\"pageRank\", F.when(F.col(\"intopic\") == 1, \n",
        "                                     F.col(\"pageRank\") + ((1-beta)*1/set_count)) \\\n",
        "                              .otherwise(F.col(\"pageRank\"))) \\\n",
        "      .drop(\"intopic\")                          \n",
        "    # Compute distance\n",
        "    if iteration > 1:\n",
        "      sdiff_isolated = 0\n",
        "    delta = connected_t1.join(connected_t0, on = \"id\", how = \"left\") \\\n",
        "    .withColumn(\"row\", F.lit(1)) \\\n",
        "    .withColumn(\"delta\",\n",
        "                F.sum(F.pow(F.col(\"rank\") - F.col(\"pageRank\"), 2)).over(Window.partitionBy(\"row\"))) \\\n",
        "                .select(\"delta\").first()[0] + sdiff_isolated\n",
        "    delta = sqrt(delta)\n",
        "    # Check end conditions\n",
        "    max_iter_reached = True if n_iter is not None and iteration == n_iter else False\n",
        "    tol_ok = True if tolerance is not None and delta <= tolerance else False\n",
        "    if max_iter_reached or tol_ok:\n",
        "      if isolated_nodes_n > 0:\n",
        "        final = connected_t1.union(isolated_t1).select(F.col(\"id\"), F.col(\"pageRank\").alias(topic)).cache()\n",
        "      else:\n",
        "        final = connected_t1.select(F.col(\"id\"), F.col(\"pageRank\").alias(topic)).cache()\n",
        "      if verbose:\n",
        "        print(\"Worker '\" + topic + \"' done\")\n",
        "      tm_mod.unpersist()\n",
        "      isolated_t0.unpersist()\n",
        "      return final\n",
        "    else:\n",
        "      # Swap\n",
        "      connected_t0 = connected_t1.withColumnRenamed(\"pageRank\", \"rank\")\n",
        "  return None"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "XT-5DGKEPMGL"
      },
      "outputs": [],
      "source": [
        "def topic_sensitive_page_rank(graph : GraphFrame,\n",
        "                              topics = None, \n",
        "                              tolerance = 10e-6, \n",
        "                              n_iter = 100, \n",
        "                              beta = 0.85, \n",
        "                              verbose = True):\n",
        "  ''' Computes the topic-sensitive PageRank for each node in the input graph.\n",
        "\n",
        "  Implementation of the topic-sensitive version of PageRank. Topics should be\n",
        "  chosen between available movie genres.\n",
        "\n",
        "  Parameters\n",
        "  ----------\n",
        "  graph: GraphFrame\n",
        "    A graph object\n",
        "  topics: list\n",
        "    A list of movie genres of interest. If 'None' corresponds to\n",
        "    classical pageRank.\n",
        "  tolerance: positive number\n",
        "    Represents the tolerance threshold when checking the distance\n",
        "    between the pageRank at the previous step and the values at the\n",
        "    current step\n",
        "  n_iter: positive number\n",
        "    Maximum number of iterations. At least one between 'tolerance' and\n",
        "    'n_iter' must be provided.\n",
        "  beta: value between 0 and 1\n",
        "    Corresponds to the probability to follow an outgoing link,\n",
        "    the damping factor.\n",
        "  verbose: logical\n",
        "    If True displays messages on function status during execution\n",
        "\n",
        "  Returns\n",
        "  -------\n",
        "  A Spark DataFrame with nodes info and one column of PageRank values for\n",
        "  each topic in input\n",
        "  '''\n",
        "  import multiprocessing as ms\n",
        "  from functools import reduce\n",
        "  # Arg validation\n",
        "  if (tolerance == None and n_iter == None):\n",
        "    raise ValueError(\"At least one argument between 'tolerance' and 'n_iter' must be set\")\n",
        "  if (beta > 1 or beta < 0):\n",
        "    raise ValueError(\"'beta' must be a value between 0 and 1\")\n",
        "\n",
        "  if topics is None or len(topics) == 0:\n",
        "    return page_rank(graph, tolerance=tolerance, n_iter=n_iter, beta=beta, verbose=verbose)\n",
        "  # Get the out degree of each node\n",
        "  out_deg = graph.outDegrees\n",
        "  # Get the transition matrix in form of triples by applying a transformation to the edges df\n",
        "  if verbose:\n",
        "    print(\"Computing transition matrix...\")\n",
        "  transition_matrix = graph.edges \\\n",
        "  .join(out_deg.withColumnRenamed(\"id\", \"dst\"), \n",
        "                                   \"dst\", how = \"left\")\n",
        "  transition_matrix = transition_matrix.withColumn(\"t_value\", (1 / F.col(\"outDegree\")*beta)) \\\n",
        "  .select(\"src\", \"dst\", \"t_value\").cache() # beta*M\n",
        "  if verbose:\n",
        "    print(\"Launching workers...\")\n",
        "  pool = ms.pool.ThreadPool(len(topics))\n",
        "  arg_list = [(transition_matrix, graph.vertices, graph.edges, t, beta, n_iter, tolerance, verbose) for t in topics]\n",
        "  res = pool.starmap(_topic_rank, arg_list)\n",
        "  if verbose:\n",
        "    print(\"Workers finished, producing final data frame...\")\n",
        "  res = reduce(lambda x,y: x.join(y, \"id\", \"left\"), res)\n",
        "  res = graph.vertices.join(res, \"id\", how=\"left\").persist()\n",
        "  pool.close()\n",
        "  if verbose:\n",
        "    print(\"Finished!\")\n",
        "  transition_matrix.unpersist()\n",
        "  return res"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "8XtLMbf2Sm3A"
      },
      "outputs": [],
      "source": [
        "def weighted_page_rank(graph : GraphFrame, \n",
        "                       weights = None,\n",
        "                       na_politic = 'min_value',\n",
        "                       tolerance = 10e-6, \n",
        "                       n_iter = 100, \n",
        "                       beta = 0.85, \n",
        "                       verbose = True):\n",
        "  ''' Computes an edge-weighted version of PageRank.\n",
        "\n",
        "  Weights are provided as a Spark DataFrame, if no weights are provided\n",
        "  the algorithm is equivalent to the base version of PageRank.\n",
        "\n",
        "  Parameters\n",
        "  ----------\n",
        "  graph: GraphFrame\n",
        "    A graph object\n",
        "  weights: either None (for non-weighted PageRank) or a Spark DataFrame\n",
        "    If a DataFrame is provided it should contain 2 columns named 'movie_id'\n",
        "    and 'weight'\n",
        "  na_politic: one between 'min_value' and 'drop'.\n",
        "    Influences how missing weights are treated - if 'min_value', NAs are \n",
        "    replaced with the minimum weight value found, otherwise the associatied movie\n",
        "    is filtered out, possibly removing associated edges\n",
        "  tolerance: positive number\n",
        "    Represents the tolerance threshold when checking the distance\n",
        "    between the pageRank at the previous step and the values at the\n",
        "    current step\n",
        "  n_iter: positive number\n",
        "    Maximum number of iterations. At least one between 'tolerance' and\n",
        "    'n_iter' must be provided.\n",
        "  beta: value between 0 and 1\n",
        "    Corresponds to the probability to follow an outgoing link,\n",
        "  verbose: logical\n",
        "    If True displays messages on function status during execution\n",
        "\n",
        "  Returns\n",
        "  -------\n",
        "  A Spark DataFrame\n",
        "  '''\n",
        "  from pyspark.sql import DataFrame\n",
        "  import multiprocessing as ms\n",
        "  from functools import reduce\n",
        "  from math import sqrt\n",
        "  # Arg validation\n",
        "  if (tolerance == None and n_iter == None):\n",
        "    raise ValueError(\"At least one argument between 'tolerance' and 'n_iter' must be set\")\n",
        "  if (beta > 1 or beta < 0):\n",
        "    raise ValueError(\"'beta' must be a value between 0 and 1\")\n",
        "  if weights is None:\n",
        "    weighted = False\n",
        "  elif isinstance(weights, DataFrame):\n",
        "    weighted = True\n",
        "  else:\n",
        "    raise ValueError(\"'weights' must be either 'None' or a DataFrame\")\n",
        "  if not weighted:\n",
        "    ## Internally call pr function\n",
        "    result = page_rank(graph = graph,\n",
        "                        tolerance = tolerance, \n",
        "                        n_iter = n_iter, \n",
        "                        beta = beta, \n",
        "                        verbose = verbose)\n",
        "    return result\n",
        "  if na_politic not in ['min_value', 'drop']:\n",
        "    print(\"Warning: unknown 'na_politic', using default value\")\n",
        "    na_politic = 'min_value'\n",
        "  if verbose:\n",
        "    print(\"Computing transition matrix...\")\n",
        "  # Assign weights to edges and compute transition matrix\n",
        "  window1 = Window.partitionBy(\"src\", \"dst\").orderBy(\"src\", \"dst\")\n",
        "  window2 = Window.partitionBy(\"dst\")\n",
        "  transition_matrix = graph.edges \\\n",
        "  .withColumn(\"movie_id\", F.explode(F.col(\"movie_ids\"))) \\\n",
        "  .withColumn(\"row\", F.row_number().over(window1)) \\\n",
        "  .join(weights, \"movie_id\", \"left\") \n",
        "  ## Dealing with missing weights\n",
        "  if na_politic == 'min_value':\n",
        "    min_weight = weights.agg({'weight':'min'}).collect()[0][0]\n",
        "    transition_matrix = transition_matrix.na.fill(value=min_weight, subset=['weight'])\n",
        "  else:\n",
        "    transition_matrix = transition_matrix.where(F.col(\"weight\").isNotNull())\n",
        "  transition_matrix = transition_matrix \\\n",
        "  .withColumn(\"weight\", F.sum(F.col(\"weight\")).over(window1)) \\\n",
        "  .where(F.col(\"row\") == 1) \\\n",
        "  .withColumn(\"out_weight\", F.sum(F.col(\"weight\")).over(window2)) \\\n",
        "  .withColumn(\"t_value\", beta*(F.col(\"weight\") / F.col(\"out_weight\"))) \\\n",
        "  .select(\"src\", \"dst\", \"t_value\").cache()\n",
        "  # Separating connected components\n",
        "  n_nodes = graph.vertices.count()\n",
        "  connected_t0 = graph.vertices.select(\"id\") \\\n",
        "    .join(graph.edges.select(F.col(\"src\").alias(\"id\")), \n",
        "          on = \"id\", how = \"leftsemi\") \\\n",
        "    .withColumn(\"rank\", F.lit(1 / n_nodes)).cache()\n",
        "  isolated_t0 = graph.vertices.select(\"id\") \\\n",
        "    .join(connected_t0, \"id\", how = \"leftanti\") \\\n",
        "    .withColumn(\"rank\", F.lit(1 / n_nodes)).cache()\n",
        "  isolated_nodes_n = isolated_t0.count()\n",
        "  sdiff_isolated = 0\n",
        "  if isolated_nodes_n > 0:\n",
        "    # Compute rank for isolated components and squared diff\n",
        "    isolated_t1 = isolated_t0.withColumn(\"pageRank\", F.lit((1-beta)*1/n_nodes))\n",
        "    sdiff_isolated = isolated_t1.withColumn(\"row\", F.lit(1)) \\\n",
        "    .withColumn(\"sdiff\", F.sum(F.pow(F.col(\"pageRank\") - F.col(\"rank\"), 2)).over(Window.partitionBy(\"row\"))) \\\n",
        "    .select(\"sdiff\").first()[0]\n",
        "    isolated_t1 = isolated_t1.drop(\"rank\").cache()\n",
        "  # Iterate\n",
        "  iteration = 0\n",
        "  while True:\n",
        "    iteration += 1\n",
        "    if verbose:\n",
        "      print(\"Iteration: \" + str(iteration))\n",
        "    # Join transition matrix with connected_t0 (on dst)\n",
        "    t_connected_0 = transition_matrix.join(connected_t0.withColumnRenamed(\"id\", \"dst\"), \"dst\", how = \"left\")\n",
        "    # Compute rank for connected components\n",
        "    t_connected_1 = t_connected_0.withColumn(\"pageRank_i\", F.col(\"t_value\")*F.col(\"rank\"))\n",
        "    window = Window.partitionBy(\"id\").orderBy(\"id\")\n",
        "    connected_t1 = t_connected_1.select(t_connected_1.src.alias(\"id\"), \n",
        "                                        t_connected_1.pageRank_i) \\\n",
        "                                        .withColumn(\"row\", F.row_number().over(window)) \\\n",
        "                                        .withColumn(\"pageRank\", F.sum(F.col(\"pageRank_i\")).over(window))                                   \n",
        "    connected_t1 = connected_t1 \\\n",
        "      .drop(\"pageRank_i\") \\\n",
        "      .where(F.col(\"row\")==1).select(\"id\", \"pageRank\") \\\n",
        "      .withColumn(\"pageRank\", F.col(\"pageRank\") + (1-beta)*1/n_nodes)                                \n",
        "    # Compute distance\n",
        "    if iteration > 1:\n",
        "      sdiff_isolated = 0\n",
        "    delta = connected_t1.join(connected_t0, on = \"id\", how = \"left\") \\\n",
        "    .withColumn(\"row\", F.lit(1)) \\\n",
        "    .withColumn(\"delta\",\n",
        "                F.sum(F.pow(F.col(\"rank\") - F.col(\"pageRank\"), 2)).over(Window.partitionBy(\"row\"))) \\\n",
        "                .select(\"delta\").first()[0] + sdiff_isolated\n",
        "    delta = sqrt(delta)\n",
        "    # Check end conditions\n",
        "    max_iter_reached = True if n_iter is not None and iteration == n_iter else False\n",
        "    tol_ok = True if tolerance is not None and delta <= tolerance else False\n",
        "    if max_iter_reached or tol_ok:\n",
        "      if isolated_nodes_n > 0:\n",
        "        final = graph.vertices.join(connected_t1.union(isolated_t1), \"id\").persist()\n",
        "        isolated_t1.unpersist()\n",
        "      else:\n",
        "        final = graph.vertices.join(connected_t1, \"id\").persist()\n",
        "      if verbose:\n",
        "        print(\"Finished\")\n",
        "      transition_matrix.unpersist()\n",
        "      connected_t0.unpersist()\n",
        "      isolated_t0.unpersist()\n",
        "      return final\n",
        "    else:\n",
        "      # Swap\n",
        "      connected_t0 = connected_t1.withColumnRenamed(\"pageRank\", \"rank\")\n",
        "  return None"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "aWp3sDX-EwT-"
      },
      "outputs": [],
      "source": [
        "edges = spark.createDataFrame([\n",
        "   (1, 2, [\"m1\"]),\n",
        "   (2, 1, [\"m1\"]),\n",
        "   (1, 5, [\"m1\", \"m4\"]),\n",
        "   (5, 1, [\"m1\", \"m4\"]),\n",
        "   (5, 8, [\"m4\"]),\n",
        "   (8, 5, [\"m4\"]),\n",
        "   (8, 10, [\"m3\"]),\n",
        "   (10, 8, [\"m3\"]),\n",
        "   (2, 3, [\"m2\", \"m5\"]),\n",
        "   (3, 2, [\"m2\", \"m5\"]),\n",
        "   (2, 6, [\"m5\"]), \n",
        "   (6, 2, [\"m5\"]), \n",
        "   (6, 8, [\"m3\"]),\n",
        "   (8, 6, [\"m3\"]),\n",
        "   (6, 10, [\"m3\"]),\n",
        "   (10, 6, [\"m3\"]),\n",
        "   (6, 3, [\"m5\"]),\n",
        "   (3, 6, [\"m5\"]),\n",
        "   (3, 7, [\"m2\"]),\n",
        "   (7, 3, [\"m2\"]),\n",
        "   (2, 7, [\"m2\"]),\n",
        "   (7, 2, [\"m2\"]),\n",
        "   (3, 4, [\"m2\"]),\n",
        "   (4, 3, [\"m2\"]),\n",
        "   (4, 7, [\"m2\"]),\n",
        "   (7, 4, [\"m2\"]),\n",
        "   (2, 4, [\"m2\"]),\n",
        "   (4, 2, [\"m2\"]),\n",
        "   (1, 8, [\"m4\"]),\n",
        "   (8, 1, [\"m4\"])                      \n",
        "], [\"src\", \"dst\", \"movie_ids\"]).coalesce(1).persist()\n",
        "nodes = spark.createDataFrame([\n",
        "   (1, \"n01\", \"ACTOR1\", [\"Drama\", \"Romance\"]),\n",
        "   (2, \"n02\", \"ACTOR2\", [\"Drama\", \"Crime\", \"Thriller\", \"Horror\"]),\n",
        "   (3, \"n03\", \"ACTOR3\", [\"Crime\", \"Thriller\", \"Horror\"]),\n",
        "   (4, \"n04\", \"ACTOR4\", [\"Crime\", \"Thriller\"]),\n",
        "   (5, \"n05\", \"ACTOR5\", [\"Drama\", \"Romance\"]),\n",
        "   (6, \"n06\", \"ACTOR6\", [\"Crime\", \"Thriller\", \"Horror\", \"Comedy\", \"Animation\", \"Fantasy\"]),\n",
        "   (7, \"n07\", \"ACTOR7\", [\"Crime\", \"Thriller\"]),\n",
        "   (8, \"n08\", \"ACTOR8\", [\"Romance\", \"Drama\", \"Comedy\", \"Animation\", \"Fantasy\"]),\n",
        "   (9, \"n09\", \"ACTOR9\", [\"Drama\"]),\n",
        "   (10, \"n10\", \"ACTOR10\", [\"Comedy\", \"Animation\", \"Fantasy\"])                            \n",
        "], [\"id\", \"nconst\", \"primaryName\", \"genres\"]).coalesce(1).persist()\n",
        "weights_df = spark.createDataFrame([\n",
        "    (\"m1\", 43),\n",
        "    (\"m2\", 35),\n",
        "    (\"m3\", 14),\n",
        "    (\"m4\", 6),\n",
        "    (\"m5\", 70)                               \n",
        "], [\"movie_id\", \"weight\"]).coalesce(1).persist()\n",
        "test_graph = GraphFrame(v = nodes, e = edges).persist()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "TnNscXFBFK56",
        "outputId": "8a8b324a-f36e-4ab7-ebc7-a9edb6b0e95f"
      },
      "outputs": [],
      "source": [
        "pr_classic = page_rank(test_graph, n_iter=10, tolerance=10e-6)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "8TKupBWIFU7-"
      },
      "outputs": [],
      "source": [
        "pr_gf = test_graph.pageRank(maxIter=10)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 542
        },
        "id": "898UpNp7FiOA",
        "outputId": "65e86113-aad0-4b71-8ac8-d9be18b48c31"
      },
      "outputs": [],
      "source": [
        "p_pr_gf = pr_gf.vertices.withColumn(\"pageRank\", pr_gf.vertices.pagerank / pr_gf.vertices.count()).toPandas()\n",
        "p_pr_class = pr_classic.toPandas()\n",
        "p_pr_gf = (p_pr_gf.loc[:, [\"id\", \"pageRank\"]]).assign(alg = \"PR GraphFrames\")\n",
        "p_pr_class = (p_pr_class.loc[:, [\"id\", \"pageRank\"]]).assign(alg = \"PR Custom\")\n",
        "res_df = p_pr_gf.append(p_pr_class).sort_values(['id'])\n",
        "fig = px.line(res_df, x = \"id\", y = \"pageRank\", color = \"alg\", \n",
        "              title=\"Differences between PageRank values obtained with custom algorithm vs GraphFrames\", \n",
        "              labels = {'id' : 'Node id', 'pageRank': 'PageRank value', 'alg': 'Algorithm'})\n",
        "fig.show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "2KywzXtcwHrG",
        "outputId": "49de0a65-99f5-4096-b739-a79c70690dc2"
      },
      "outputs": [],
      "source": [
        "ts_pr = topic_sensitive_page_rank(test_graph, topics = ['Drama', 'Thriller'], n_iter = 10, tolerance=10e-6)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "kgWDE4XTwcH4"
      },
      "outputs": [],
      "source": [
        "ts_valid = pr_classic.join(ts_pr.drop(\"nconst\", \"primaryName\", \"genres\"), \"id\")\n",
        "ts_valid = ts_valid.withColumn(\"Pos_class\", F.row_number().over(Window.orderBy(F.col(\"pageRank\").desc()))) \\\n",
        ".withColumn(\"Pos_drama\", F.row_number().over(Window.orderBy(F.col(\"Drama\").desc()))) \\\n",
        ".withColumn(\"Pos_thriller\", F.row_number().over(Window.orderBy(F.col(\"Thriller\").desc()))) \\\n",
        ".drop(\"nconst\", \"primaryName\", \"genres\") \n",
        "ts_valid_pd = ts_valid.toPandas()\n",
        "ts_valid_pd_1 = ts_valid_pd[['id', 'pageRank', 'Drama', 'Thriller']].rename(columns = {\"pageRank\" : \"Classic PageRank\", \"Drama\": \"Topic -  Drama\", \"Thriller\": \"Topic - Thriller\"})\n",
        "ts_valid_pd_1 = ts_valid_pd_1.melt(id_vars=\"id\", var_name=\"Value type\", value_name=\"PageRank\")\n",
        "ts_valid_pd_2 = ts_valid_pd[['id', 'Pos_class', 'Pos_drama', 'Pos_thriller']] \\\n",
        ".rename(columns={'Pos_class': \"Classic PageRank\", 'Pos_drama': \"Topic -  Drama\", 'Pos_thriller': \"Topic - Thriller\"}) \\\n",
        ".melt(id_vars='id', var_name=\"Value type\", value_name='Position')\n",
        "ts_valid_pd = ts_valid_pd_1.merge(ts_valid_pd_2, on = ['id', 'Value type']).sort_values('Position')"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 542
        },
        "id": "sb8ufrP4wvjU",
        "outputId": "26492dcb-a619-4a3c-ec1c-2911ae802228"
      },
      "outputs": [],
      "source": [
        "fig2 = px.bar(ts_valid_pd, x=\"PageRank\", y=\"Position\",\n",
        "             orientation = 'h',\n",
        "             color='Value type', barmode='group', title = \"Comparison between classical PageRank values and topic-biased values\",\n",
        "             text = \"id\",\n",
        "             labels = {'PageRank' : 'PageRank value'}, width=900)\n",
        "fig2.update_yaxes(type='category', autorange=\"reversed\")\n",
        "fig2.update_traces(textposition='outside')\n",
        "fig2.update_layout(legend=dict(\n",
        "    orientation=\"v\",\n",
        "    yanchor=\"bottom\",\n",
        "    y=0,\n",
        "    xanchor=\"right\",\n",
        "    x=1\n",
        "))\n",
        "fig2.show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "FtDWv4BtmKt6",
        "outputId": "45f389e3-a6ca-4e46-ce6f-214c890cb3d6"
      },
      "outputs": [],
      "source": [
        "w_pr = weighted_page_rank(test_graph, weights=weights_df,  n_iter=10, tolerance=10e-6)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "A72o0X5moQzi"
      },
      "outputs": [],
      "source": [
        "window_rank = Window.orderBy(F.col(\"pageRank\").desc())\n",
        "w_comp = w_pr.withColumn(\"alg\", F.lit(\"Weighted PR\")) \\\n",
        ".withColumn(\"Position\", F.row_number().over(window_rank)) \\\n",
        ".union(pr_classic.withColumn(\"alg\", F.lit(\"Classic PR\")) \\\n",
        "       .withColumn(\"Position\", F.row_number().over(window_rank))) \n",
        "w_comp = w_comp.toPandas().sort_values([\"Position\"], ascending = True)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 542
        },
        "id": "6IxSqwQHog86",
        "outputId": "968aff61-5956-4afb-bb69-bb7381356c95"
      },
      "outputs": [],
      "source": [
        "fig3 = px.bar(w_comp, x=\"pageRank\", y=\"Position\",\n",
        "             orientation = 'h',\n",
        "             color='alg', barmode='group', title = \"Comparison between classical PageRank values and weighted values\",\n",
        "             text = \"id\",\n",
        "             labels = {'pageRank' : 'PageRank value', 'alg':'Algorithm'}, width=900)\n",
        "fig3.update_yaxes(type='category', autorange=\"reversed\")\n",
        "fig3.update_traces(textposition='outside')\n",
        "fig3.update_layout(legend=dict(\n",
        "    orientation=\"v\",\n",
        "    yanchor=\"bottom\",\n",
        "    y=0,\n",
        "    xanchor=\"right\",\n",
        "    x=1\n",
        "))\n",
        "fig3.show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "hdEBeAUnOA67"
      },
      "outputs": [],
      "source": [
        "spark.stop()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 73,
          "resources": {
            "http://localhost:8080/nbextensions/google.colab/files.js": {
              "data": "Ly8gQ29weXJpZ2h0IDIwMTcgR29vZ2xlIExMQwovLwovLyBMaWNlbnNlZCB1bmRlciB0aGUgQXBhY2hlIExpY2Vuc2UsIFZlcnNpb24gMi4wICh0aGUgIkxpY2Vuc2UiKTsKLy8geW91IG1heSBub3QgdXNlIHRoaXMgZmlsZSBleGNlcHQgaW4gY29tcGxpYW5jZSB3aXRoIHRoZSBMaWNlbnNlLgovLyBZb3UgbWF5IG9idGFpbiBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKLy8KLy8gICAgICBodHRwOi8vd3d3LmFwYWNoZS5vcmcvbGljZW5zZXMvTElDRU5TRS0yLjAKLy8KLy8gVW5sZXNzIHJlcXVpcmVkIGJ5IGFwcGxpY2FibGUgbGF3IG9yIGFncmVlZCB0byBpbiB3cml0aW5nLCBzb2Z0d2FyZQovLyBkaXN0cmlidXRlZCB1bmRlciB0aGUgTGljZW5zZSBpcyBkaXN0cmlidXRlZCBvbiBhbiAiQVMgSVMiIEJBU0lTLAovLyBXSVRIT1VUIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4KLy8gU2VlIHRoZSBMaWNlbnNlIGZvciB0aGUgc3BlY2lmaWMgbGFuZ3VhZ2UgZ292ZXJuaW5nIHBlcm1pc3Npb25zIGFuZAovLyBsaW1pdGF0aW9ucyB1bmRlciB0aGUgTGljZW5zZS4KCi8qKgogKiBAZmlsZW92ZXJ2aWV3IEhlbHBlcnMgZm9yIGdvb2dsZS5jb2xhYiBQeXRob24gbW9kdWxlLgogKi8KKGZ1bmN0aW9uKHNjb3BlKSB7CmZ1bmN0aW9uIHNwYW4odGV4dCwgc3R5bGVBdHRyaWJ1dGVzID0ge30pIHsKICBjb25zdCBlbGVtZW50ID0gZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgnc3BhbicpOwogIGVsZW1lbnQudGV4dENvbnRlbnQgPSB0ZXh0OwogIGZvciAoY29uc3Qga2V5IG9mIE9iamVjdC5rZXlzKHN0eWxlQXR0cmlidXRlcykpIHsKICAgIGVsZW1lbnQuc3R5bGVba2V5XSA9IHN0eWxlQXR0cmlidXRlc1trZXldOwogIH0KICByZXR1cm4gZWxlbWVudDsKfQoKLy8gTWF4IG51bWJlciBvZiBieXRlcyB3aGljaCB3aWxsIGJlIHVwbG9hZGVkIGF0IGEgdGltZS4KY29uc3QgTUFYX1BBWUxPQURfU0laRSA9IDEwMCAqIDEwMjQ7CgpmdW5jdGlvbiBfdXBsb2FkRmlsZXMoaW5wdXRJZCwgb3V0cHV0SWQpIHsKICBjb25zdCBzdGVwcyA9IHVwbG9hZEZpbGVzU3RlcChpbnB1dElkLCBvdXRwdXRJZCk7CiAgY29uc3Qgb3V0cHV0RWxlbWVudCA9IGRvY3VtZW50LmdldEVsZW1lbnRCeUlkKG91dHB1dElkKTsKICAvLyBDYWNoZSBzdGVwcyBvbiB0aGUgb3V0cHV0RWxlbWVudCB0byBtYWtlIGl0IGF2YWlsYWJsZSBmb3IgdGhlIG5leHQgY2FsbAogIC8vIHRvIHVwbG9hZEZpbGVzQ29udGludWUgZnJvbSBQeXRob24uCiAgb3V0cHV0RWxlbWVudC5zdGVwcyA9IHN0ZXBzOwoKICByZXR1cm4gX3VwbG9hZEZpbGVzQ29udGludWUob3V0cHV0SWQpOwp9CgovLyBUaGlzIGlzIHJvdWdobHkgYW4gYXN5bmMgZ2VuZXJhdG9yIChub3Qgc3VwcG9ydGVkIGluIHRoZSBicm93c2VyIHlldCksCi8vIHdoZXJlIHRoZXJlIGFyZSBtdWx0aXBsZSBhc3luY2hyb25vdXMgc3RlcHMgYW5kIHRoZSBQeXRob24gc2lkZSBpcyBnb2luZwovLyB0byBwb2xsIGZvciBjb21wbGV0aW9uIG9mIGVhY2ggc3RlcC4KLy8gVGhpcyB1c2VzIGEgUHJvbWlzZSB0byBibG9jayB0aGUgcHl0aG9uIHNpZGUgb24gY29tcGxldGlvbiBvZiBlYWNoIHN0ZXAsCi8vIHRoZW4gcGFzc2VzIHRoZSByZXN1bHQgb2YgdGhlIHByZXZpb3VzIHN0ZXAgYXMgdGhlIGlucHV0IHRvIHRoZSBuZXh0IHN0ZXAuCmZ1bmN0aW9uIF91cGxvYWRGaWxlc0NvbnRpbnVlKG91dHB1dElkKSB7CiAgY29uc3Qgb3V0cHV0RWxlbWVudCA9IGRvY3VtZW50LmdldEVsZW1lbnRCeUlkKG91dHB1dElkKTsKICBjb25zdCBzdGVwcyA9IG91dHB1dEVsZW1lbnQuc3RlcHM7CgogIGNvbnN0IG5leHQgPSBzdGVwcy5uZXh0KG91dHB1dEVsZW1lbnQubGFzdFByb21pc2VWYWx1ZSk7CiAgcmV0dXJuIFByb21pc2UucmVzb2x2ZShuZXh0LnZhbHVlLnByb21pc2UpLnRoZW4oKHZhbHVlKSA9PiB7CiAgICAvLyBDYWNoZSB0aGUgbGFzdCBwcm9taXNlIHZhbHVlIHRvIG1ha2UgaXQgYXZhaWxhYmxlIHRvIHRoZSBuZXh0CiAgICAvLyBzdGVwIG9mIHRoZSBnZW5lcmF0b3IuCiAgICBvdXRwdXRFbGVtZW50Lmxhc3RQcm9taXNlVmFsdWUgPSB2YWx1ZTsKICAgIHJldHVybiBuZXh0LnZhbHVlLnJlc3BvbnNlOwogIH0pOwp9CgovKioKICogR2VuZXJhdG9yIGZ1bmN0aW9uIHdoaWNoIGlzIGNhbGxlZCBiZXR3ZWVuIGVhY2ggYXN5bmMgc3RlcCBvZiB0aGUgdXBsb2FkCiAqIHByb2Nlc3MuCiAqIEBwYXJhbSB7c3RyaW5nfSBpbnB1dElkIEVsZW1lbnQgSUQgb2YgdGhlIGlucHV0IGZpbGUgcGlja2VyIGVsZW1lbnQuCiAqIEBwYXJhbSB7c3RyaW5nfSBvdXRwdXRJZCBFbGVtZW50IElEIG9mIHRoZSBvdXRwdXQgZGlzcGxheS4KICogQHJldHVybiB7IUl0ZXJhYmxlPCFPYmplY3Q+fSBJdGVyYWJsZSBvZiBuZXh0IHN0ZXBzLgogKi8KZnVuY3Rpb24qIHVwbG9hZEZpbGVzU3RlcChpbnB1dElkLCBvdXRwdXRJZCkgewogIGNvbnN0IGlucHV0RWxlbWVudCA9IGRvY3VtZW50LmdldEVsZW1lbnRCeUlkKGlucHV0SWQpOwogIGlucHV0RWxlbWVudC5kaXNhYmxlZCA9IGZhbHNlOwoKICBjb25zdCBvdXRwdXRFbGVtZW50ID0gZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQob3V0cHV0SWQpOwogIG91dHB1dEVsZW1lbnQuaW5uZXJIVE1MID0gJyc7CgogIGNvbnN0IHBpY2tlZFByb21pc2UgPSBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gewogICAgaW5wdXRFbGVtZW50LmFkZEV2ZW50TGlzdGVuZXIoJ2NoYW5nZScsIChlKSA9PiB7CiAgICAgIHJlc29sdmUoZS50YXJnZXQuZmlsZXMpOwogICAgfSk7CiAgfSk7CgogIGNvbnN0IGNhbmNlbCA9IGRvY3VtZW50LmNyZWF0ZUVsZW1lbnQoJ2J1dHRvbicpOwogIGlucHV0RWxlbWVudC5wYXJlbnRFbGVtZW50LmFwcGVuZENoaWxkKGNhbmNlbCk7CiAgY2FuY2VsLnRleHRDb250ZW50ID0gJ0NhbmNlbCB1cGxvYWQnOwogIGNvbnN0IGNhbmNlbFByb21pc2UgPSBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gewogICAgY2FuY2VsLm9uY2xpY2sgPSAoKSA9PiB7CiAgICAgIHJlc29sdmUobnVsbCk7CiAgICB9OwogIH0pOwoKICAvLyBXYWl0IGZvciB0aGUgdXNlciB0byBwaWNrIHRoZSBmaWxlcy4KICBjb25zdCBmaWxlcyA9IHlpZWxkIHsKICAgIHByb21pc2U6IFByb21pc2UucmFjZShbcGlja2VkUHJvbWlzZSwgY2FuY2VsUHJvbWlzZV0pLAogICAgcmVzcG9uc2U6IHsKICAgICAgYWN0aW9uOiAnc3RhcnRpbmcnLAogICAgfQogIH07CgogIGNhbmNlbC5yZW1vdmUoKTsKCiAgLy8gRGlzYWJsZSB0aGUgaW5wdXQgZWxlbWVudCBzaW5jZSBmdXJ0aGVyIHBpY2tzIGFyZSBub3QgYWxsb3dlZC4KICBpbnB1dEVsZW1lbnQuZGlzYWJsZWQgPSB0cnVlOwoKICBpZiAoIWZpbGVzKSB7CiAgICByZXR1cm4gewogICAgICByZXNwb25zZTogewogICAgICAgIGFjdGlvbjogJ2NvbXBsZXRlJywKICAgICAgfQogICAgfTsKICB9CgogIGZvciAoY29uc3QgZmlsZSBvZiBmaWxlcykgewogICAgY29uc3QgbGkgPSBkb2N1bWVudC5jcmVhdGVFbGVtZW50KCdsaScpOwogICAgbGkuYXBwZW5kKHNwYW4oZmlsZS5uYW1lLCB7Zm9udFdlaWdodDogJ2JvbGQnfSkpOwogICAgbGkuYXBwZW5kKHNwYW4oCiAgICAgICAgYCgke2ZpbGUudHlwZSB8fCAnbi9hJ30pIC0gJHtmaWxlLnNpemV9IGJ5dGVzLCBgICsKICAgICAgICBgbGFzdCBtb2RpZmllZDogJHsKICAgICAgICAgICAgZmlsZS5sYXN0TW9kaWZpZWREYXRlID8gZmlsZS5sYXN0TW9kaWZpZWREYXRlLnRvTG9jYWxlRGF0ZVN0cmluZygpIDoKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgJ24vYSd9IC0gYCkpOwogICAgY29uc3QgcGVyY2VudCA9IHNwYW4oJzAlIGRvbmUnKTsKICAgIGxpLmFwcGVuZENoaWxkKHBlcmNlbnQpOwoKICAgIG91dHB1dEVsZW1lbnQuYXBwZW5kQ2hpbGQobGkpOwoKICAgIGNvbnN0IGZpbGVEYXRhUHJvbWlzZSA9IG5ldyBQcm9taXNlKChyZXNvbHZlKSA9PiB7CiAgICAgIGNvbnN0IHJlYWRlciA9IG5ldyBGaWxlUmVhZGVyKCk7CiAgICAgIHJlYWRlci5vbmxvYWQgPSAoZSkgPT4gewogICAgICAgIHJlc29sdmUoZS50YXJnZXQucmVzdWx0KTsKICAgICAgfTsKICAgICAgcmVhZGVyLnJlYWRBc0FycmF5QnVmZmVyKGZpbGUpOwogICAgfSk7CiAgICAvLyBXYWl0IGZvciB0aGUgZGF0YSB0byBiZSByZWFkeS4KICAgIGxldCBmaWxlRGF0YSA9IHlpZWxkIHsKICAgICAgcHJvbWlzZTogZmlsZURhdGFQcm9taXNlLAogICAgICByZXNwb25zZTogewogICAgICAgIGFjdGlvbjogJ2NvbnRpbnVlJywKICAgICAgfQogICAgfTsKCiAgICAvLyBVc2UgYSBjaHVua2VkIHNlbmRpbmcgdG8gYXZvaWQgbWVzc2FnZSBzaXplIGxpbWl0cy4gU2VlIGIvNjIxMTU2NjAuCiAgICBsZXQgcG9zaXRpb24gPSAwOwogICAgZG8gewogICAgICBjb25zdCBsZW5ndGggPSBNYXRoLm1pbihmaWxlRGF0YS5ieXRlTGVuZ3RoIC0gcG9zaXRpb24sIE1BWF9QQVlMT0FEX1NJWkUpOwogICAgICBjb25zdCBjaHVuayA9IG5ldyBVaW50OEFycmF5KGZpbGVEYXRhLCBwb3NpdGlvbiwgbGVuZ3RoKTsKICAgICAgcG9zaXRpb24gKz0gbGVuZ3RoOwoKICAgICAgY29uc3QgYmFzZTY0ID0gYnRvYShTdHJpbmcuZnJvbUNoYXJDb2RlLmFwcGx5KG51bGwsIGNodW5rKSk7CiAgICAgIHlpZWxkIHsKICAgICAgICByZXNwb25zZTogewogICAgICAgICAgYWN0aW9uOiAnYXBwZW5kJywKICAgICAgICAgIGZpbGU6IGZpbGUubmFtZSwKICAgICAgICAgIGRhdGE6IGJhc2U2NCwKICAgICAgICB9LAogICAgICB9OwoKICAgICAgbGV0IHBlcmNlbnREb25lID0gZmlsZURhdGEuYnl0ZUxlbmd0aCA9PT0gMCA/CiAgICAgICAgICAxMDAgOgogICAgICAgICAgTWF0aC5yb3VuZCgocG9zaXRpb24gLyBmaWxlRGF0YS5ieXRlTGVuZ3RoKSAqIDEwMCk7CiAgICAgIHBlcmNlbnQudGV4dENvbnRlbnQgPSBgJHtwZXJjZW50RG9uZX0lIGRvbmVgOwoKICAgIH0gd2hpbGUgKHBvc2l0aW9uIDwgZmlsZURhdGEuYnl0ZUxlbmd0aCk7CiAgfQoKICAvLyBBbGwgZG9uZS4KICB5aWVsZCB7CiAgICByZXNwb25zZTogewogICAgICBhY3Rpb246ICdjb21wbGV0ZScsCiAgICB9CiAgfTsKfQoKc2NvcGUuZ29vZ2xlID0gc2NvcGUuZ29vZ2xlIHx8IHt9OwpzY29wZS5nb29nbGUuY29sYWIgPSBzY29wZS5nb29nbGUuY29sYWIgfHwge307CnNjb3BlLmdvb2dsZS5jb2xhYi5fZmlsZXMgPSB7CiAgX3VwbG9hZEZpbGVzLAogIF91cGxvYWRGaWxlc0NvbnRpbnVlLAp9Owp9KShzZWxmKTsK",
              "headers": [
                [
                  "content-type",
                  "application/javascript"
                ]
              ],
              "ok": true,
              "status": 200,
              "status_text": ""
            }
          }
        },
        "id": "eBYcfRtJ92Kn",
        "outputId": "ede572ef-acdf-4aed-e4db-9e8cff8d78ea"
      },
      "outputs": [],
      "source": [
        "from google.colab import files\n",
        "\n",
        "uploaded = files.upload()\n",
        "  \n",
        "!mkdir -p ~/.kaggle/ && mv kaggle.json ~/.kaggle/ && chmod 600 ~/.kaggle/kaggle.json"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "pEokbQHt99Z8",
        "outputId": "10865184-c6cf-4307-b30f-0e4e2f8d2cad"
      },
      "outputs": [],
      "source": [
        "!kaggle datasets download \"ashirwadsangwan/imdb-dataset\"\n",
        "!unzip -jq imdb-dataset.zip -d ."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "OzXhwC0qGmR0"
      },
      "outputs": [],
      "source": [
        "# Get an optimized SparkSession for bigger dataset\n",
        "spark = SparkSession.builder \\\n",
        ".master(\"local[*]\") \\\n",
        ".config(\"spark.sql.shuffle.partitions\", \"100\") \\\n",
        ".config(\"spark.sql.autoBroadcastJoinThreshold\", \"-1\") \\\n",
        ".getOrCreate()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "j4dKD8HdNhRo"
      },
      "outputs": [],
      "source": [
        "# Contains info about movies\n",
        "movies_df_schema = StructType() \\\n",
        ".add(\"tconst\", StringType(), False) \\\n",
        ".add(\"titleType\", StringType(), False) \\\n",
        ".add(\"primaryTitle\", StringType(), False) \\\n",
        ".add(\"originalTitle\", StringType(), False) \\\n",
        ".add(\"isAdult\", ByteType(), False) \\\n",
        ".add(\"startYear\", IntegerType(), True) \\\n",
        ".add(\"endYear\", IntegerType(), True) \\\n",
        ".add(\"runtimeMinutes\", IntegerType(), True) \\\n",
        ".add(\"genres\", StringType(), True)\n",
        "movies_df = spark.read.format(\"csv\") \\\n",
        "                      .option(\"header\", \"true\") \\\n",
        "                      .option(\"delimiter\", \"\\t\") \\\n",
        "                      .schema(movies_df_schema) \\\n",
        "                      .load(\"title.basics.tsv.gz\")\n",
        "movies_df = movies_df \\\n",
        "  .replace({'\\\\N': None}) \\\n",
        "  .filter(movies_df.titleType.isin([\"movie\", \"tvMovie\"])).cache()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "dDMrYl5Lrev_"
      },
      "outputs": [],
      "source": [
        "# Contains info about actors\n",
        "names_df_schema = StructType() \\\n",
        ".add(\"nconst\", StringType(), False) \\\n",
        ".add(\"primaryName\", StringType(), False) \\\n",
        ".add(\"birthYear\", IntegerType(), True) \\\n",
        ".add(\"deathYear\", IntegerType(), True) \\\n",
        ".add(\"primaryProfession\", StringType(), True) \\\n",
        ".add(\"knownForTitles\", StringType(), True)\n",
        "names_df = spark.read.format(\"csv\") \\\n",
        "                      .option(\"header\", \"true\") \\\n",
        "                      .option(\"delimiter\", \"\\t\") \\\n",
        "                      .schema(names_df_schema) \\\n",
        "                      .load(\"name.basics.tsv.gz\")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "pWZnh07R87Xg"
      },
      "outputs": [],
      "source": [
        "names_df = names_df \\\n",
        ".replace({'\\\\N': None}) \\\n",
        ".filter(names_df.primaryProfession.contains(\"actor\") | names_df.primaryProfession.contains(\"actress\")) \\\n",
        ".select(\"nconst\", \"primaryName\") \\\n",
        ".withColumn(\"id\", F.row_number().over(Window.orderBy(\"nconst\"))-1).cache()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "THjBVh7LAZ0P"
      },
      "outputs": [],
      "source": [
        "# Contains relationships between actors and movies\n",
        "rel_df_schema = StructType() \\\n",
        ".add(\"tconst\", StringType(), False) \\\n",
        ".add(\"ordering\", IntegerType(), False) \\\n",
        ".add(\"nconst\", StringType(), False) \\\n",
        ".add(\"category\", StringType(), False) \\\n",
        ".add(\"job\", StringType(), True) \\\n",
        ".add(\"characters\", StringType(), True)\n",
        "rel_df = spark.read.format(\"csv\") \\\n",
        "                      .option(\"header\", \"true\") \\\n",
        "                      .option(\"delimiter\", \"\\t\") \\\n",
        "                      .schema(rel_df_schema) \\\n",
        "                      .load(\"title.principals.tsv.gz\")\n",
        "rel_df = rel_df \\\n",
        ".replace({'\\\\N': None}) \\\n",
        ".join(other=names_df, on=\"nconst\") \\\n",
        ".select(\"id\", \"tconst\") "
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "AVmgtIbMni5Q"
      },
      "outputs": [],
      "source": [
        "cast_by_mov = rel_df \\\n",
        ".groupby(rel_df.tconst) \\\n",
        ".agg(F.collect_set(rel_df.id).alias(\"cast\")) \\\n",
        ".join(other=movies_df, on=\"tconst\", how=\"inner\").cache()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "BTB7uLeHva1t"
      },
      "outputs": [],
      "source": [
        "import itertools\n",
        "def co_rows(row) :\n",
        "  comb = list(itertools.permutations(row[1], 2))\n",
        "  new_rows = []\n",
        "  movie_genres = row[9].split(\",\") if row[9] is not None else None\n",
        "  for c in comb:\n",
        "    new_rows.append((c[0], c[1], row[0], movie_genres))\n",
        "  return new_rows\n",
        "actor_actor_rel_df = cast_by_mov.rdd.flatMap(lambda row: co_rows(row)) \\\n",
        ".toDF([\"src\", \"dst\", \"movie_id\", \"movie_genres\"]).cache()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "f_2h8XWCx9HE"
      },
      "outputs": [],
      "source": [
        "names_df = names_df.join(\n",
        "    other = actor_actor_rel_df \\\n",
        "    .groupby(F.col(\"src\").alias(\"id\")) \\\n",
        "    .agg(F.array_distinct(F.flatten(F.collect_set(F.col(\"movie_genres\")))).alias(\"genres\")),\n",
        "    on = \"id\", how = \"inner\"\n",
        ").persist()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "EpitbTf0jnAg"
      },
      "outputs": [],
      "source": [
        "edges_df = actor_actor_rel_df \\\n",
        ".drop(\"movie_genres\") \\\n",
        ".groupBy([F.col(\"src\"), F.col(\"dst\")]) \\\n",
        ".agg(F.collect_set(F.col(\"movie_id\")).alias(\"movie_ids\")).persist()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "61_rfwVxqoXW"
      },
      "outputs": [],
      "source": [
        "# Contains user ratings\n",
        "ratings_df_schema = StructType() \\\n",
        ".add(\"tconst\", StringType(), False) \\\n",
        ".add(\"averageRating\", DoubleType(), False) \\\n",
        ".add(\"numVotes\", IntegerType(), False) \n",
        "ratings_df = spark.read.format(\"csv\") \\\n",
        "                      .option(\"header\", \"true\") \\\n",
        "                      .option(\"delimiter\", \"\\t\") \\\n",
        "                      .schema(ratings_df_schema) \\\n",
        "                      .load(\"title.ratings.tsv.gz\") \\\n",
        "                      .cache()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "43noZztCh9Y7"
      },
      "outputs": [],
      "source": [
        "wind = Window.partitionBy(\"row\")\n",
        "edge_weights_df = ratings_df \\\n",
        ".repartition(200) \\\n",
        ".join(movies_df, on =\"tconst\", how=\"leftsemi\") \\\n",
        ".withColumn(\"row\", F.lit(1)) \\\n",
        ".withColumn(\"r_mean\", F.avg(F.col(\"averageRating\")).over(wind)) \\\n",
        ".withColumn(\"r_sd\", F.stddev(F.col(\"averageRating\")).over(wind)) \\\n",
        ".withColumn(\"r_zscore\", F.col(\"averageRating\") - F.col(\"r_mean\") / F.col(\"r_sd\")) \\\n",
        ".drop(\"r_mean\", \"r_sd\") \\\n",
        ".withColumn(\"v_mean\", F.avg(F.col(\"numVotes\")).over(wind)) \\\n",
        ".withColumn(\"v_sd\", F.stddev(F.col(\"numVotes\")).over(wind)) \\\n",
        ".withColumn(\"v_zscore\", F.col(\"numVotes\") - F.col(\"v_mean\") / F.col(\"v_sd\")) \\\n",
        ".drop(\"v_mean\", \"v_sd\") \\\n",
        ".withColumn(\"min_r_zscore\", F.min(\"r_zscore\").over(wind)) \\\n",
        ".withColumn(\"r_zscore\", F.col(\"r_zscore\") - F.col(\"min_r_zscore\") + 1) \\\n",
        ".drop(\"min_r_zscore\") \\\n",
        ".withColumn(\"min_v_zscore\", F.min(\"v_zscore\").over(wind)) \\\n",
        ".withColumn(\"v_zscore\", F.col(\"v_zscore\") - F.col(\"min_v_zscore\") + 1) \\\n",
        ".drop(\"min_v_zscore\", \"row\") \\\n",
        ".withColumn(\"A\", F.lit(0.40)) \\\n",
        ".withColumn(\"B\", F.lit(0.60)) \\\n",
        ".withColumn(\"weight\", F.col(\"r_zscore\")*F.col(\"A\") + F.col(\"v_zscore\")*F.col(\"B\")) \\\n",
        ".select(F.col(\"tconst\").alias(\"movie_id\"), \"weight\").coalesce(100).persist()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "ATEPT1_W15RK"
      },
      "outputs": [],
      "source": [
        "graph = GraphFrame(v = names_df, e = edges_df).persist()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "Jxz_6xdS72CS"
      },
      "outputs": [],
      "source": [
        "#@title ### Function parameters { run: \"auto\" }\n",
        "#@markdown #### Arguments for all functions\n",
        "n_iter = 15 #@param {type:\"slider\", min:1, max:1000, step:1}\n",
        "tolerance = 1e-4 #@param {type:\"number\"}\n",
        "beta = 0.85 #@param {type:\"number\"}\n",
        "verbose = True #@param {type:\"boolean\"}\n",
        "#@markdown #### Arguments for `weighted_page_rank`\n",
        "na_politic = 'min_value' #@param ['min_value', 'drop']\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "MLDX2-OuK9yy"
      },
      "outputs": [],
      "source": [
        "all_genres = actor_actor_rel_df.select(\"movie_genres\").distinct().groupby().agg(F.collect_set(F.col('movie_genres'))).collect()\n",
        "from itertools import chain\n",
        "all_genres = set(chain.from_iterable(all_genres[0][0]))"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 111,
          "referenced_widgets": [
            "f48e9bab73c64344abe8f737c0ffce23",
            "680a300564244137b1741461d03958c7",
            "e5e6f84429404689ac87df80322f5ecd"
          ]
        },
        "id": "RaC64-ulK_EU",
        "outputId": "ebd8f853-2079-40e5-cc07-c1811fdae874"
      },
      "outputs": [],
      "source": [
        "import ipywidgets as widgets\n",
        "from IPython.display import display\n",
        "topics = widgets.SelectMultiple(\n",
        "    options=all_genres,\n",
        "    description='Topics',\n",
        "    disabled=False\n",
        ")\n",
        "display(topics)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "Y6hbI1f6_eYa",
        "outputId": "ef97e8fe-bf7a-499a-9b75-3f105df8b0bb"
      },
      "outputs": [],
      "source": [
        "PR_classic = page_rank(graph=graph,\n",
        "                       tolerance=tolerance,\n",
        "                       n_iter=n_iter,\n",
        "                       beta=beta,\n",
        "                       verbose=verbose)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "0DlSnPixTBuV",
        "outputId": "c6de637d-7778-4549-a917-cb6852b3a1fb"
      },
      "outputs": [],
      "source": [
        "PR_ts = topic_sensitive_page_rank(graph=graph,\n",
        "                                  tolerance=tolerance,\n",
        "                                  n_iter=n_iter,\n",
        "                                  beta=beta,\n",
        "                                  topics=list(topics.value),\n",
        "                                  verbose=verbose)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "p28CjGGOTuwp",
        "outputId": "0cbb6cb8-f791-4ab9-f6c1-3147361d6de7"
      },
      "outputs": [],
      "source": [
        "PR_ew = weighted_page_rank(graph=graph,\n",
        "                           weights = edge_weights_df,\n",
        "                           na_politic = 'min_value',\n",
        "                           tolerance=tolerance,\n",
        "                           n_iter=n_iter,\n",
        "                           beta=beta,\n",
        "                           verbose=verbose)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "haFOWBRYVPDv"
      },
      "outputs": [],
      "source": [
        "# Top 100 ranking nodes - classic PR\n",
        "top_class_nodes = PR_classic.withColumn(\"Ranking\", F.row_number().over(Window.orderBy(F.col('pageRank').desc()))) \\\n",
        "  .where(F.col(\"Ranking\") <= 100)\n",
        "top_class_edges = graph.edges.join(top_class_nodes, F.col(\"src\") == F.col(\"id\"), how=\"leftsemi\") \\\n",
        "  .join(top_class_nodes, F.col(\"dst\") == F.col(\"id\"), how=\"leftsemi\")\n",
        "top_class_nodes_pd = top_class_nodes.toPandas()\n",
        "top_class_edges_pd = top_class_edges.toPandas()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "background_save": true
        },
        "id": "QobrnHmIWJYs"
      },
      "outputs": [],
      "source": [
        "# Top 100 ranking nodes - topic-sensitive PR\n",
        "if (len(list(topics.value)) > 0):\n",
        "  top_ts_nodes = PR_ts.withColumn(\"map\", F.create_map(list(itertools.chain.from_iterable((F.lit(t), F.col(t)) for t in list(topics.value))))) \\\n",
        "    .drop(*[t for t in list(topics.value)]) \\\n",
        "    .select('*', F.explode(F.col(\"map\")).alias('topic','pageRank')) \\\n",
        "    .drop(\"map\") \\\n",
        "    .withColumn(\"Ranking\", F.row_number().over(Window.partitionBy(\"topic\").orderBy(F.col(\"pageRank\").desc()))) \\\n",
        "    .filter(F.col(\"Ranking\") <= 100)\n",
        "  top_ts_edges = graph.edges.join(top_ts_nodes, F.col(\"src\") == F.col(\"id\"), how=\"leftsemi\") \\\n",
        "    .join(top_ts_nodes, F.col(\"dst\") == F.col(\"id\"), how=\"leftsemi\")\n",
        "  top_ts_nodes_pd = top_ts_nodes.toPandas()\n",
        "  top_ts_edges_pd = top_ts_edges.toPandas()\n",
        "else:\n",
        "  top_ts_nodes = PR_ts.withColumn(\"Ranking\", F.row_number().over(Window.orderBy(F.col('pageRank').desc()))) \\\n",
        "    .where(F.col(\"Ranking\") <= 100)\n",
        "  top_ts_edges = graph.edges.join(top_ts_nodes, F.col(\"src\") == F.col(\"id\"), how=\"leftsemi\") \\\n",
        "    .join(top_ts_nodes, F.col(\"dst\") == F.col(\"id\"), how=\"leftsemi\")\n",
        "  top_ts_nodes_pd = top_ts_nodes.toPandas()\n",
        "  top_ts_edges_pd = top_ts_edges.toPandas()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "3BazNXfaWOzc"
      },
      "outputs": [],
      "source": [
        "# Top 100 ranking nodes - weighted PR\n",
        "top_w_nodes = PR_ew.withColumn(\"Ranking\", F.row_number().over(Window.orderBy(F.col('pageRank').desc()))) \\\n",
        ".where(F.col(\"Ranking\") <= 100)\n",
        "top_w_edges = graph.edges.join(top_w_nodes, F.col(\"src\") == F.col(\"id\"), how=\"leftsemi\") \\\n",
        "  .join(top_w_nodes, F.col(\"dst\") == F.col(\"id\"), how=\"leftsemi\")\n",
        "top_w_nodes_pd = top_w_nodes.toPandas()\n",
        "top_w_edges_pd = top_w_edges.toPandas()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 74,
      "metadata": {
        "id": "Z5XZqYveklq1"
      },
      "outputs": [],
      "source": [
        "# Renaming cols\n",
        "top_class_nodes_pd.rename(columns={'primaryName': 'label'}, inplace=True)\n",
        "top_class_edges_pd.rename(columns={'src':'source', 'dst':'target'}, inplace=True)\n",
        "top_ts_nodes_pd.rename(columns={'primaryName': 'label'}, inplace=True)\n",
        "top_ts_edges_pd.rename(columns={'src':'source', 'dst':'target'}, inplace=True)\n",
        "top_w_nodes_pd.rename(columns={'primaryName': 'label'}, inplace=True)\n",
        "top_w_edges_pd.rename(columns={'src':'source', 'dst':'target'}, inplace=True)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 75,
      "metadata": {
        "id": "cPc0NYS1lM9I"
      },
      "outputs": [],
      "source": [
        "# Assign fill colors to PR values\n",
        "def assign_fill(df):\n",
        "  # Sample single color from value\n",
        "  def get_fill_color(value, max):\n",
        "    val_resc = value/max\n",
        "    return plotly.colors.sample_colorscale(plotly.colors.diverging.Temps, val_resc)[0]\n",
        "  max_pr = df['pageRank'].max()\n",
        "  return df.assign(fill = lambda df: df['pageRank'].map(lambda pr: get_fill_color(pr, max_pr)))\n",
        "\n",
        "top_class_nodes_pd = assign_fill(top_class_nodes_pd)\n",
        "top_w_nodes_pd = assign_fill(top_w_nodes_pd)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 76,
      "metadata": {
        "id": "7aXl-A3tmMa_"
      },
      "outputs": [],
      "source": [
        "# Convert in dictionaries for cytoscape\n",
        "def convert_to_dict(nodes, edges, topic = None):\n",
        "  if topic is None:\n",
        "    nodes_dict = nodes.to_dict('records')\n",
        "    edges_dict = edges.to_dict('records')\n",
        "    elements = []\n",
        "    for n in nodes_dict:\n",
        "      elements.append({'data': n, 'group':'nodes'})\n",
        "    for e in edges_dict:\n",
        "      elements.append({'data': e, 'group':'edges'})\n",
        "    return elements\n",
        "  else:\n",
        "    topic_df = nodes[nodes['topic'] == topic]\n",
        "    topic_df = assign_fill(topic_df)\n",
        "    edges_sub = edges[(edges['source'].isin(topic_df['id'].to_numpy())) &\n",
        "                      (edges['target'].isin(topic_df['id'].to_numpy()))]\n",
        "    nodes_dict = topic_df.to_dict('records')\n",
        "    edges_dict = edges_sub.to_dict('records')\n",
        "    elements = []\n",
        "    for n in nodes_dict:\n",
        "      elements.append({'data': n, 'group':'nodes'})\n",
        "    for e in edges_dict:\n",
        "      elements.append({'data': e, 'group':'edges'})\n",
        "    return elements\n",
        "\n",
        "top_class_el = convert_to_dict(top_class_nodes_pd, top_class_edges_pd)\n",
        "top_ts_el_list = [convert_to_dict(top_ts_nodes_pd, top_ts_edges_pd, t) for t in list(topics.value)]\n",
        "top_w_el = convert_to_dict(top_w_nodes_pd, top_w_edges_pd)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 77,
      "metadata": {
        "id": "wwXiFUXwnJF9"
      },
      "outputs": [],
      "source": [
        "# Function for generating color legend\n",
        "def generate_color_legend(df):\n",
        "  import numpy as np\n",
        "  min_pr = df['pageRank'].min()\n",
        "  max_pr = df['pageRank'].max()\n",
        "  n=100\n",
        "  rg = np.linspace(start=min_pr, stop=max_pr, num=n)\n",
        "  sequences = [(\"temps\", px.colors.diverging.Temps)]\n",
        "  color_bar = go.Figure(\n",
        "          data=[\n",
        "              go.Bar(\n",
        "                  orientation=\"h\",\n",
        "                  y=[name] * n,\n",
        "                  x=np.array([(max_pr)/n]*(n)),\n",
        "                  customdata=[x for x in rg],\n",
        "                  marker=dict(color=list(rg), \n",
        "                              colorscale=name, \n",
        "                              line_width=0),\n",
        "                  hovertemplate=\"%{customdata}\"\n",
        "              )\n",
        "              for name, colors in reversed(sequences)\n",
        "          ],\n",
        "          layout=dict(\n",
        "              title={\"text\": \"PageRank value\", \"x\" : 0.5},\n",
        "              barmode=\"stack\",\n",
        "              bargap=0,\n",
        "              showlegend=False,\n",
        "              xaxis=dict(showticklabels=True, \n",
        "                        showgrid=True,\n",
        "                        range=[min_pr, max_pr],\n",
        "                        tickmode = 'array',\n",
        "                        tickvals = np.linspace(start=min_pr, stop=max_pr, num=5)\n",
        "                        ),\n",
        "              yaxis=dict(visible=False),\n",
        "              height=150,\n",
        "              width=600,\n",
        "              margin=dict(b=10)\n",
        "          ))\n",
        "  return color_bar\n",
        "\n",
        "class_bar = generate_color_legend(top_class_nodes_pd)\n",
        "w_bar = generate_color_legend(top_w_nodes_pd)\n",
        "ts_bar_list = [generate_color_legend(top_ts_nodes_pd[top_ts_nodes_pd['topic'] == t]) for t in list(topics.value)]"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 78,
      "metadata": {
        "id": "CWcljqj-Gb0x"
      },
      "outputs": [],
      "source": [
        "def generate_ranking_graph(df, topic = None):\n",
        "  if topic is None:\n",
        "    fig = px.bar(df, x=\"pageRank\", y=\"Ranking\",\n",
        "             orientation = 'h',\n",
        "             barmode='group',\n",
        "             text = \"label\",\n",
        "             labels = {'pageRank' : 'PageRank value'}, width=900, height = 1500)\n",
        "    fig.update_yaxes(type='category', autorange=\"reversed\")\n",
        "    fig.update_traces(textposition='outside')\n",
        "  else:\n",
        "    fig = px.bar(df[df['topic'] == topic], \n",
        "                 x=\"pageRank\", y=\"Ranking\",\n",
        "                 orientation = 'h',\n",
        "                 barmode='group',\n",
        "                 text = \"label\",\n",
        "                 labels = {'pageRank' : 'PageRank value'}, width=900, height = 900)\n",
        "    fig.update_yaxes(type='category', autorange=\"reversed\")\n",
        "    fig.update_traces(textposition='outside')\n",
        "  return fig\n",
        "\n",
        "class_plot_rank = generate_ranking_graph(top_class_nodes_pd)\n",
        "ts_plot_rank_list = [generate_ranking_graph(top_ts_nodes_pd, t) for t in list(topics.value)]\n",
        "w_plot_rank = generate_ranking_graph(top_w_nodes_pd)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 52
        },
        "id": "rnCOZE4YsvW4",
        "outputId": "8ec0665e-1e48-4a12-b53d-a6c8a7bac1e4"
      },
      "outputs": [],
      "source": [
        "app = JupyterDash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])\n",
        "cyto_stylesheet = [\n",
        "    {\n",
        "        'selector': 'node',\n",
        "        'style': {\n",
        "            'label': 'data(label)',\n",
        "            'background-color': 'data(fill)',\n",
        "            'text-halign' : 'center',\n",
        "            'text-valign' : 'top',\n",
        "            'text-wrap' : 'wrap'\n",
        "        }\n",
        "    }, \n",
        "    {\n",
        "      'selector': ':selected',\n",
        "      'style': {\n",
        "        'background-color': 'LightBlue'\n",
        "      }\n",
        "    }\n",
        "]\n",
        "\n",
        "pr_view = html.Div([\n",
        "    cyto.Cytoscape(\n",
        "      layout={'name': 'circle'},\n",
        "      style={'width': '100%', 'height': '700px'},\n",
        "      id=\"cyto_pr\",\n",
        "      elements=top_class_el,\n",
        "      stylesheet = cyto_stylesheet, \n",
        "      autounselectify=False, \n",
        "      responsive=True\n",
        "      )\n",
        "], style= {'borderWidth': '1px', \n",
        "           'borderStyle': 'solid',\n",
        "           'width': '100%',\n",
        "           'height': '100%'},\n",
        "  className='border-secondary rounded', id = \"cyto-container\")\n",
        "col_1 = dbc.Col(children=[\n",
        "   pr_view                      \n",
        "], width = 9)\n",
        "col_2 = dbc.Col([\n",
        "   html.Label(['Graph layout', dcc.Dropdown(\n",
        "    id='dropdown',\n",
        "    value='circle',\n",
        "    clearable=False,\n",
        "    options=[\n",
        "        {'label': name.capitalize(), 'value': name}\n",
        "        for name in ['grid', 'random', 'circle', 'cose', 'concentric']\n",
        "    ], \n",
        "    style={'width':'100%'}\n",
        "   )], style={'width':'100%'}),\n",
        "   dbc.Card([\n",
        "       dbc.CardBody([\n",
        "          html.H4(\"Topics\", className=\"card-title\"), \n",
        "          dbc.FormGroup([\n",
        "            dbc.RadioItems(\n",
        "                options=[\n",
        "                  dict(label= t, value= list(topics.value).index(t)) for t in list(topics.value)  \n",
        "                ],\n",
        "                value=0,\n",
        "                id=\"topic-choice\"\n",
        "            )\n",
        "          ])\n",
        "       ])      \n",
        "   ], color = \"info\", outline=True, id = \"topic-card\", style={'display':'none'}),\n",
        "   dbc.Card([\n",
        "     dbc.CardBody([\n",
        "        html.H4(\"INFO\", className=\"card-title\"),\n",
        "        html.H6(\"\"\"Select nodes to visualize associated \n",
        "        information\"\"\", className=\"card-subtitle\"),\n",
        "        html.Div(className=\"card-text\", id = 'card-info-1')      \n",
        "     ])\n",
        "   ])             \n",
        "], width = 3)\n",
        "buttons = html.Div([dbc.ButtonGroup(\n",
        "    [\n",
        "        dbc.Button(\"Classic PageRank\", style = {'marginRight': '5px'}, \n",
        "                   active=True, id='btn-prc'),\n",
        "        dbc.Button(\"Topic-sensitive PageRank\", style = {'marginRight': '5px'},\n",
        "                   id='btn-prts'),\n",
        "        dbc.Button(\"Weighted PageRank\", style = {'paddingTop':'2px'},\n",
        "                   id='btn-prw')\n",
        "    ])])\n",
        "row_1 = dbc.Row(children=[\n",
        "   dbc.Col(buttons)                   \n",
        "], style = {'marginBottom': '1rem'})\n",
        "row_2 = dbc.Row(children=[\n",
        "    col_1,\n",
        "    col_2                    \n",
        "])\n",
        "row_3 = dbc.Row(children=[\n",
        "   dbc.Col([\n",
        "      html.Center([\n",
        "        dcc.Graph(id = \"colorscale\", figure = class_bar, config={'autosizable':True})\n",
        "      ], style = {'width':'100%', 'height':'150px'})      \n",
        "   ], width=9)                                              \n",
        "])\n",
        "row_4 = dbc.Row(children=[\n",
        "   dbc.Col([\n",
        "      html.Center([\n",
        "        dcc.Graph(id = \"rank-plot\", figure = class_plot_rank, config={'autosizable':True})\n",
        "      ], style = {'width':'100%'})      \n",
        "   ])                       \n",
        "])\n",
        "\n",
        "app.layout = dbc.Container(children=[row_1, row_2, row_3, row_4], \n",
        "                           id = 'app_container', \n",
        "                           fluid=True, style={'padding': '1rem'})\n",
        "\n",
        "@app.callback(Output('card-info-1', 'children'),\n",
        "              Input('cyto_pr', 'selectedNodeData'))\n",
        "def display_node_info(selected):\n",
        "  if selected is None or len(selected) == 0:\n",
        "    return ''\n",
        "  children = []\n",
        "  for n in selected:\n",
        "    children.append(\n",
        "        html.Div([\n",
        "          html.P([html.Span('Node id: ', style={'fontWeight':'bold'}),\n",
        "                  html.Span(n.get(\"id\"))]),\n",
        "          html.P([html.Span('Name: ', style={'fontWeight':'bold'}),\n",
        "                  html.Span(n.get(\"label\"))]), \n",
        "          html.P([html.Span('Genres: ', style={'fontWeight':'bold'}),\n",
        "                  html.Span(\", \".join(n.get(\"genres\")))]),    \n",
        "          html.P([html.Span('PageRank: ', style={'fontWeight':'bold'}),\n",
        "                  html.Span(n.get(\"pageRank\"))]),                   \n",
        "        ], style={'marginTop':'1em', 'padding':'1em'}, \n",
        "        className=\"border border-secondary rounded\")\n",
        "    )\n",
        "  return children\n",
        "\n",
        "@app.callback(Output('cyto_pr', 'layout'),\n",
        "              Input('dropdown', 'value'))\n",
        "def update_layout(layout):\n",
        "    return {\n",
        "        'name': layout,\n",
        "        'animate': True\n",
        "    }\n",
        "\n",
        "@app.callback(Output(\"cyto_pr\", \"elements\"),\n",
        "              Output(\"topic-card\", \"style\"),\n",
        "              Output(\"colorscale\", \"figure\"),\n",
        "              Output(\"btn-prc\", \"active\"),\n",
        "              Output(\"btn-prts\", \"active\"),\n",
        "              Output(\"btn-prw\", \"active\"),\n",
        "              Output(\"rank-plot\", \"figure\"),\n",
        "              Input(\"btn-prc\", \"n_clicks\"),\n",
        "              Input(\"btn-prts\", \"n_clicks\"),\n",
        "              Input(\"btn-prw\", \"n_clicks\"),\n",
        "              Input(\"topic-choice\", \"value\")\n",
        "              )\n",
        "def toggle_buttons(btn_c, btn_ts, bt_w, t_choice):\n",
        "  ctx = dash.callback_context\n",
        "  if not ctx.triggered:\n",
        "    raise dash.exceptions.PreventUpdate\n",
        "  else:\n",
        "    comp_id = ctx.triggered[0]['prop_id'].split('.')[0]\n",
        "  if comp_id == \"btn-prc\":\n",
        "    return top_class_el, {'display':'none'}, class_bar, True, False, False, class_plot_rank\n",
        "  elif comp_id == \"btn-prts\" or comp_id == \"topic-choice\":\n",
        "    return top_ts_el_list[t_choice], {'marginBottom':'5px'}, ts_bar_list[t_choice], False, True, False, ts_plot_rank_list[t_choice]\n",
        "  elif comp_id == \"btn-prw\":\n",
        "    return top_w_el, {'display':'none'}, w_bar, False, False, True, w_plot_rank\n",
        "\n",
        "if __name__ == '__main__':\n",
        "    app.run_server(mode='inline', height=2000)\n",
        "    #app.run_server(mode='external')"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "authorship_tag": "ABX9TyNXAuanzmBPvQIs81oZjcXl",
      "collapsed_sections": [],
      "include_colab_link": true,
      "name": "PR_for_imdb.ipynb",
      "provenance": []
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    },
    "widgets": {
      "application/vnd.jupyter.widget-state+json": {
        "680a300564244137b1741461d03958c7": {
          "model_module": "@jupyter-widgets/base",
          "model_module_version": "1.2.0",
          "model_name": "LayoutModel",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "e5e6f84429404689ac87df80322f5ecd": {
          "model_module": "@jupyter-widgets/controls",
          "model_module_version": "1.5.0",
          "model_name": "DescriptionStyleModel",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "f48e9bab73c64344abe8f737c0ffce23": {
          "model_module": "@jupyter-widgets/controls",
          "model_module_version": "1.5.0",
          "model_name": "SelectMultipleModel",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "SelectMultipleModel",
            "_options_labels": [
              "Thriller",
              "Romance",
              "Action",
              "Music",
              "Sport",
              "Mystery",
              "Sci-Fi",
              "Talk-Show",
              "Biography",
              "Fantasy",
              "Animation",
              "Adult",
              "Game-Show",
              "Horror",
              "War",
              "Western",
              "Reality-TV",
              "Musical",
              "Documentary",
              "Drama",
              "History",
              "Short",
              "Comedy",
              "Crime",
              "Adventure",
              "Family",
              "Film-Noir",
              "News"
            ],
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "SelectMultipleView",
            "description": "Topics",
            "description_tooltip": null,
            "disabled": false,
            "index": [
              2,
              13,
              19,
              22
            ],
            "layout": "IPY_MODEL_680a300564244137b1741461d03958c7",
            "rows": 5,
            "style": "IPY_MODEL_e5e6f84429404689ac87df80322f5ecd"
          }
        }
      }
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
