package yhb.udrank

import org.apache.spark.storage.StorageLevel
import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}

import scala.io.Source

/**
  * 使用自定义rank进行推荐
  */
object UDRank {

  def rank(initMatrix:StringCoordinateMatrix,probabilityMatrix:StringCoordinateMatrix,
           d:Double = 0.15,iterations:Int = 30,partitionNum:Int = 2) = {
    var sysRDD = initMatrix.value

    val probabilityRDD = probabilityMatrix.value.map(x=>(x._1,(x._2,x._3)))
      .partitionBy(new HashPartitioner(partitionNum)).persist(StorageLevel.MEMORY_AND_DISK_SER)

    val initRDD = initMatrix.value.map(x=>((x._1,x._2),x._3 * d))
      .partitionBy(new HashPartitioner(partitionNum)).persist(StorageLevel.MEMORY_AND_DISK_SER)

    for(i <- 0 until iterations){
      sysRDD = sysRDD.map(x=>(x._2,(x._1,x._3))).join(probabilityRDD).map{
        x=>
          val rowName = x._2._1._1
          val colName = x._2._2._1
          val cellValue = x._2._1._2*x._2._2._2
          ((rowName,colName),cellValue)
      }.reduceByKey(new HashPartitioner(partitionNum),_ + _).mapValues(_ * (1-d))
        .fullOuterJoin(initRDD).map(x=>(x._1._1,x._1._2,x._2._1.getOrElse(0.0)+x._2._2.getOrElse(0.0)))
    }
    sysRDD
  }

  def main(args: Array[String]) {
    val conf = new SparkConf().setAppName("spark-partition-test").setMaster("local")
    val sc = new SparkContext(conf)
    val userSrc = Source.fromFile("data/spark/udrank/user2label.txt").getLines().toArray
    val bookSrc = Source.fromFile("data/spark/udrank/book2label.txt").getLines().toArray
    val userBooks = Source.fromFile("data/spark/udrank/userBooks.txt").getLines().toArray

    val userLabelRDD = sc.parallelize(userSrc.map{
      line =>
        val strs = line.split(" ")
        (strs(0),strs(1).split(","))
    }).flatMap(x=>x._2.map(y=>((x._1,y),1.0))).reduceByKey(_+_).map(x=>(x._1._1,x._1._2,x._2))
    val bookLabelRDD = sc.parallelize(bookSrc.map{
      line =>
        val strs = line.split(" ")
        (strs(0),strs(1).split(","))
    }).flatMap(x=>x._2.map(y=>((x._1,y),1.0))).reduceByKey(_+_).map(x=>(x._1._1,x._1._2,x._2))
    val userBooksRDD = sc.parallelize(userBooks.map{
      line =>
        val strs = line.split(" ")
        (strs(0),strs(1).split(","))
    }).flatMap(x=>x._2.map(y=>((x._1,y),1.0))).reduceByKey(_+_).map(x=>(x._1._1,x._1._2,x._2))

    val mblRDD = bookLabelRDD.map(x=>(x._1,(x._2,x._3))).groupByKey().
      flatMap(x=>x._2.map(y=>(x._1,y._1,y._2/x._2.map(_._2).sum)))
    val mlpRDD = userLabelRDD.map(x=>(x._2,(x._1,x._3))).groupByKey().
      flatMap(x=>x._2.map(y=>(x._1,y._1,y._2/x._2.map(_._2).sum)))
    val mplRDD = userLabelRDD.map(x=>(x._1,(x._2,x._3))).groupByKey().
      flatMap(x=>x._2.map(y=>(x._1,y._1,y._2/x._2.map(_._2).sum)))
    val mlbRDD = bookLabelRDD.map(x=>(x._2,(x._1,x._3))).groupByKey().
      flatMap(x=>x._2.map(y=>(x._1,y._1,y._2/x._2.map(_._2).sum)))

    val initRDD = userBooksRDD.map(x=>(x._1,(x._2,x._3))).groupByKey().
      flatMap(x=>x._2.map(y=>(x._1,y._1,y._2/x._2.map(_._2).sum)))

    val mbl = new StringCoordinateMatrix(mblRDD)
    val mlp = new StringCoordinateMatrix(mlpRDD)
    val mpl = new StringCoordinateMatrix(mplRDD)
    val mlb = new StringCoordinateMatrix(mlbRDD)

    val initMatrix = new StringCoordinateMatrix(initRDD)

    val a = 0.85

    val mbb = (mbl * mlp * mpl * mlb * (1-a)) + (mbl * mlb * a)
    rank(initMatrix,mbb,0.75).foreach(println)
//    (initMatrix * mbb).value.foreach(println)
  }
}
