package com.shujia.spark.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo23Pagerank {
  def main(args: Array[String]): Unit = {

    //阻尼系数
    val Q = 0.85

    val conf = new SparkConf().setMaster("local").setAppName("pr")
    val sc = new SparkContext(conf)

    val data = sc.textFile("spark/data/pagerank.txt")

    //计算网页数量
    val N = data.count()

    var prData: RDD[(String, List[String], Double)] = data.map(line => {
      //A   B,D 1.0
      val split = line.split("\\|")
      val page = split(0)
      //出连列表
      val link = split(1).split(",").toList
      val pr = split(2).toDouble
      (page, link, pr)
    })

    //每个网页的出连列表
    val linkData = prData.map(kv => (kv._1, kv._2))

    //提出条件
    var flag = true

    while (flag) {
      //根据pagerank算法计算每个网页新的pr值
      val fData = prData.flatMap(kv => {
        val page = kv._1
        val link = kv._2
        val pr = kv._3

        //分配给每个出连网页的pr值
        val fpr = pr / link.length

        link.map(p => (p, fpr))
      })

      val xData = fData
        //统计每个网页分到的总的pr值
        .reduceByKey(_ + _)
        .map(kv => {
          val page = kv._1
          val pr = kv._2
          //增加阻尼系数
          val xpr = (1 - Q) / N + Q * pr
          (page, xpr)
        })


      //关联出连列表
      val joinData: RDD[(String, List[String], Double)] = xData.join(linkData).map(kv => {
        val page = kv._1
        val pr = kv._2._1
        val link = kv._2._2
        (page, link, pr)
      })


      //判断是否达到收敛条件   计算每个网页当前pr值和上一次pr差值的平均值
      val rdd1 = prData.map(kv => (kv._1, kv._3))
      val rdd2 = joinData.map(kv => (kv._1, kv._3))

      //差值
      val cRDD = rdd1.join(rdd2).map(kv => {
        //计算前后的差值
        Math.abs(kv._2._1 - kv._2._2)
      })

      //平均值
      val cmean = cRDD.sum() / N
      println("当前差值平均值：" + cmean)
      //判断是否收敛
      if (cmean < 0.001) {
        flag = false
      }

      //下一次迭代的时候使用新的数据
      prData = joinData
    }

    prData.foreach(println)

  }

}
