package cb

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.types.StructType
import com.doit.common.util.CollectionUtil

/**
  * Created by hunter.coder 涛哥  
  * 2019/5/5 18:01
  * 交流qq:657270652
  * Version: 1.0
  * 更多学习资料：https://blog.csdn.net/coderblack/
  * Description:  基于内容相似度的推荐
  **/
object CBRecommend {


  def main(args: Array[String]): Unit = {

    Logger.getLogger("org").setLevel(Level.WARN)
    val spark = SparkSession.builder().appName("cb_rec").master("local").getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    // 加载user-item评分矩阵
    val user_profile = spark.read.parquet("G:\\testdata\\comment\\uimatrix")


    // 然后根据user profile  结合  item  profile ，来修正相似度求topn作为推荐结果
    // 加载物品相似度矩阵
    val item_profile = spark.read.parquet("G:\\data_shark\\doit_recommender\\src\\test\\data\\item_sim\\").drop("osim")

    val ui_profile = user_profile.join(item_profile, ('pid === 'pid2) or ('pid === 'pid1))

    val ui_profile2 = ui_profile.rdd.map(row => {
      val gid = row.getAs[String]("gid")
      val pid = row.getAs[String]("pid")
      val score = row.getAs[Double]("score")
      val pid1 = row.getAs[String]("pid1")
      val pid2 = row.getAs[String]("pid2")
      val cosim = row.getAs[Double]("cosim")

      (gid, pid, score, pid1, pid2, cosim)
    }).groupBy(tp => (tp._1, tp._2))
      .map(tp => {
        val list: Iterable[(String, String, Double, String, String, Double)] = tp._2.toList
        var map = Map.empty[String, Double]
        var score = 0.0

        list.map(tp => {
          val pid_a: String = tp._4
          val pid_b: String = tp._5
          val sim: Double = tp._6
          score = tp._3

          map += (pid_a -> sim)
          map += (pid_b -> sim)
        })


        // gid,pid,score,关联推荐商品
        (tp._1._1, tp._1._2, score, map.-(tp._1._2))
      })
    /*.toDF("gid","pid","score","rec_map")
    .show(10,false)*/


    /**
      * |gid|pid|score|rec_map                                                                                             |
      * +---+---+-----+----------------------------------------------------------------------------------------------------+
      * |1  |p02|0.0  |Map(p05 -> 0.0, p01 -> 0.04340906744446256, p04 -> 0.07388112382856259, p03 -> 0.045187654883417874)|
      * |2  |p03|3.0  |Map(p05 -> 0.0, p02 -> 0.045187654883417874, p01 -> 0.03946911390929408, p04 -> 0.02382923741228304)|
      * |2  |p01|4.0  |Map(p05 -> 0.0, p02 -> 0.04340906744446256, p04 -> 0.011091958987710822, p03 -> 0.03946911390929408)|
      * |1  |p01|5.0  |Map(p05 -> 0.0, p02 -> 0.04340906744446256, p04 -> 0.011091958987710822, p03 -> 0.03946911390929408)|
      * |2  |p02|2.0  |Map(p05 -> 0.0, p01 -> 0.04340906744446256, p04 -> 0.07388112382856259, p03 -> 0.045187654883417874)|
      * |3  |p05|3.0  |Map(p02 -> 0.0, p01 -> 0.0, p04 -> 0.0, p03 -> 0.0)                                                 |
      * |3  |p02|1.0  |Map(p05 -> 0.0, p01 -> 0.04340906744446256, p04 -> 0.07388112382856259, p03 -> 0.045187654883417874)|
      * |3  |p04|2.0  |Map(p05 -> 0.0, p02 -> 0.07388112382856259, p01 -> 0.011091958987710822, p03 -> 0.02382923741228304)|
      * +---+---+-----+----------------------------------------------------------------------------------------------------+
      *
      *
      */

    // 后续逻辑：  把历史喜好程度score去修正那一行中的每一个推荐商品相似度 ，得到每一个推荐商品的修正后的相似度
    ui_profile2.map({ case (gid, pid, score, rec_map) => {
      val recMap_xz = rec_map.mapValues(_ * score).map(x => x)
      (gid, recMap_xz)
    }
    })
      // 再按gid去分组聚合：  将相同推荐商品的相似度得分累加，最后取分组最高的topn个作为最终返回结果
      .reduceByKey(CollectionUtil.mergeMap)
      // 按推荐得分倒序排序，取topn
      .map(tp => {
      (tp._1, tp._2.toList.sortBy(-_._2).take(args(0).toInt))
    })
      .toDF("gid", "rec_map")
      .write.parquet("")


    spark.close()


  }

}
