package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import java.math.BigDecimal

object Demo9PageRank {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("pr")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    //1、读取数据
    val dataDF: DataFrame = spark
      .read
      .format("csv")
      .schema("page STRING,outLink STRING")
      .option("sep", ",")
      .load("data/pagerank.txt")

    //将处理列表切分出来
    val pageDF: DataFrame = dataDF
      .select($"page", split($"outLink", "\\|") as "outLink")

    pageDF.cache()

    var initPrDF: DataFrame = pageDF
      //3、给每隔网页一个初始的PR值
      .withColumn("pr", expr("1.0"))

    val q = 0.85
    val N: Long = pageDF.count()


    var flag = true

    //循环迭代计算
    while (flag) {
      val newPrDF: DataFrame = initPrDF
        //4、几个每隔页面分到的PR值
        .withColumn("avgPr", $"pr" / size($"outLink"))
        //5、将出链列表展开
        .select(explode($"outLink") as "page", $"avgPr")
        //6、计算每隔网页分到的PR值
        .groupBy($"page")
        //增加阻尼系数
        .agg(sum($"avgPr") * q + (1 - q) / N as "pr")
        //7、关联获取每隔网页的出链列表
        .join(pageDF, "page")

      //计算所有页面核上一次PR差值的平均值
      val avgDiffDF: DataFrame = newPrDF.as("a")
        .join(initPrDF.as("b"), "page")
        .withColumn("diffPr", abs($"a.pr" - $"b.pr"))
        .agg(avg($"diffPr") as "avgDiff")

      avgDiffDF.printSchema()
      //取出差值平均值
      val row: Row = avgDiffDF.head()
      val avgDiff: Double = row.getAs[Double]("avgDiff")

      //收敛条件
      if (avgDiff < 0.01) {
        flag = false
      }

      newPrDF.show()
      initPrDF = newPrDF
    }
  }
}
